From 00f81b9eca840bfa958c8c1c659bddee5c00bc0f Mon Sep 17 00:00:00 2001 From: Anton Kolesnikov Date: Mon, 25 Nov 2024 21:46:46 +0800 Subject: [PATCH] feat(v2): background compaction cleanup (#3694) --- .mockery.yaml | 3 + api/gen/proto/go/metastore/v1/compactor.pb.go | 613 +-- .../go/metastore/v1/compactor_vtproto.pb.go | 1285 +++--- api/gen/proto/go/metastore/v1/index.pb.go | 205 +- .../proto/go/metastore/v1/index_vtproto.pb.go | 418 ++ .../v1/metastorev1connect/index.connect.go | 36 +- .../metastorev1connect/index.connect.mux.go | 5 + .../go/metastore/v1/raft_log/raft_log.pb.go | 1222 +++++- .../v1/raft_log/raft_log_vtproto.pb.go | 3550 ++++++++++++++++- api/gen/proto/go/metastore/v1/types.pb.go | 209 +- .../proto/go/metastore/v1/types_vtproto.pb.go | 265 ++ api/go.mod | 2 +- api/go.sum | 4 +- api/metastore/v1/compactor.proto | 96 +- api/metastore/v1/index.proto | 9 + api/metastore/v1/raft_log/raft_log.proto | 101 +- api/metastore/v1/types.proto | 6 + api/openapiv2/gen/phlare.swagger.json | 200 +- ebpf/go.mod | 2 +- ebpf/go.sum | 4 +- go.mod | 14 +- go.sum | 28 +- pkg/experiment/compactor/compaction_worker.go | 546 ++- .../compactor/compaction_worker_metrics.go | 79 +- pkg/experiment/distributor/distributor.go | 12 + .../ingester/singlereplica/singlereplica.go | 25 - .../metastore/cleaner_raft_handler.go | 114 - pkg/experiment/metastore/cleaner_service.go | 90 - pkg/experiment/metastore/client/methods.go | 6 + .../metastore/client/server_mock_test.go | 4 + pkg/experiment/metastore/compaction/README.md | 318 ++ .../metastore/compaction/compaction.go | 59 + .../compaction/compactor/compaction_queue.go | 412 ++ .../compactor/compaction_queue_bench_test.go | 58 + .../compactor/compaction_queue_test.go | 247 ++ .../compaction/compactor/compactor.go | 140 + .../compactor/compactor_strategy.go | 69 + .../compaction/compactor/compactor_test.go | 122 + .../metastore/compaction/compactor/metrics.go | 68 + .../compaction/compactor/metrics_test.go | 30 + .../metastore/compaction/compactor/plan.go | 143 + .../compaction/compactor/plan_test.go | 266 ++ .../compactor/store/block_queue_store.go | 121 + .../compactor/store/block_queue_store_test.go | 103 + .../metastore/compaction/scheduler/metrics.go | 119 + .../compaction/scheduler/metrics_test.go | 29 + .../compaction/scheduler/schedule.go | 178 + .../compaction/scheduler/schedule_test.go | 321 ++ .../compaction/scheduler/scheduler.go | 153 + .../compaction/scheduler/scheduler_queue.go | 186 + .../scheduler/scheduler_queue_test.go | 151 + .../compaction/scheduler/scheduler_test.go | 89 + .../scheduler/store/job_plan_store.go | 47 + .../scheduler/store/job_plan_store_test.go | 46 + .../scheduler/store/job_state_store.go | 88 + .../scheduler/store/job_state_store_test.go | 51 + .../compaction/scheduler/store/job_store.go | 27 + .../metastore/compaction_planner.go | 115 - pkg/experiment/metastore/compaction_queue.go | 225 -- .../metastore/compaction_queue_test.go | 71 - .../metastore/compaction_raft_handler.go | 794 +--- .../metastore/compaction_service.go | 143 +- .../metastore/compactionpb/compaction.pb.go | 430 -- .../metastore/compactionpb/compaction.proto | 51 - .../compactionpb/compaction_vtproto.pb.go | 690 ---- pkg/experiment/metastore/dlq/recovery.go | 4 +- pkg/experiment/metastore/fsm/boltdb.go | 4 + pkg/experiment/metastore/fsm/fsm.go | 76 +- pkg/experiment/metastore/index/index.go | 223 +- pkg/experiment/metastore/index/index_test.go | 298 +- .../metastore/index/partition_key.go | 37 - .../metastore/index/partition_meta.go | 4 +- pkg/experiment/metastore/index/store.go | 159 - .../metastore/index/store/index_store.go | 181 + .../metastore/index/store/partition_key.go | 65 + .../index/store/partition_key_test.go | 67 + .../metastore/index_raft_handler.go | 77 +- pkg/experiment/metastore/index_service.go | 49 +- .../metastore/markers/deletion_markers.go | 243 -- .../markers/deletion_markers_test.go | 39 - pkg/experiment/metastore/metastore.go | 131 +- pkg/experiment/metastore/metastore_raft.go | 12 +- pkg/experiment/metastore/query_service.go | 9 +- pkg/experiment/metastore/raftnode/node.go | 98 +- .../metastore/raftnode/node_bootstrap.go | 56 +- .../metastore/raftnode/node_read.go | 152 +- .../raftnode/raftnodepb/raft_node.pb.go | 138 +- .../raftnode/raftnodepb/raft_node.proto | 3 +- .../raftnodepb/raft_node_vtproto.pb.go | 41 +- pkg/experiment/metastore/raftnode/service.go | 10 +- pkg/experiment/metastore/store/store.go | 44 + .../metastore/storeutils/storeutils.go | 22 - pkg/experiment/metastore/tenant_service.go | 5 +- .../tombstones/store/tombstone_store.go | 103 + .../tombstones/store/tombstone_store_test.go | 101 + .../metastore/tombstones/tombstone_queue.go | 75 + .../metastore/tombstones/tombstones.go | 179 + .../query_backend/block/compaction.go | 109 +- .../query_backend/block/compaction_test.go | 8 +- pkg/experiment/query_backend/block/object.go | 12 +- .../query_backend/block/section_profiles.go | 4 +- pkg/experiment/query_backend/block_reader.go | 44 +- pkg/experiment/query_backend/query.go | 46 +- pkg/objstore/not_found.go | 23 + pkg/phlare/modules_experimental.go | 16 +- pkg/test/boltdb.go | 23 + pkg/test/idempotence.go | 34 + .../mockcompactor/mock_block_queue_store.go | 229 ++ .../mocks/mockcompactor/mock_tombstones.go | 86 + pkg/test/mocks/mockindex/mock_store.go | 222 +- .../mock_index_service_client.go | 74 + .../mock_index_service_server.go | 59 + .../mocks/mockscheduler/mock_job_store.go | 379 ++ pkg/test/time.go | 25 + pkg/util/concurrency.go | 48 + 115 files changed, 14335 insertions(+), 5124 deletions(-) delete mode 100644 pkg/experiment/ingester/singlereplica/singlereplica.go delete mode 100644 pkg/experiment/metastore/cleaner_raft_handler.go delete mode 100644 pkg/experiment/metastore/cleaner_service.go create mode 100644 pkg/experiment/metastore/compaction/README.md create mode 100644 pkg/experiment/metastore/compaction/compaction.go create mode 100644 pkg/experiment/metastore/compaction/compactor/compaction_queue.go create mode 100644 pkg/experiment/metastore/compaction/compactor/compaction_queue_bench_test.go create mode 100644 pkg/experiment/metastore/compaction/compactor/compaction_queue_test.go create mode 100644 pkg/experiment/metastore/compaction/compactor/compactor.go create mode 100644 pkg/experiment/metastore/compaction/compactor/compactor_strategy.go create mode 100644 pkg/experiment/metastore/compaction/compactor/compactor_test.go create mode 100644 pkg/experiment/metastore/compaction/compactor/metrics.go create mode 100644 pkg/experiment/metastore/compaction/compactor/metrics_test.go create mode 100644 pkg/experiment/metastore/compaction/compactor/plan.go create mode 100644 pkg/experiment/metastore/compaction/compactor/plan_test.go create mode 100644 pkg/experiment/metastore/compaction/compactor/store/block_queue_store.go create mode 100644 pkg/experiment/metastore/compaction/compactor/store/block_queue_store_test.go create mode 100644 pkg/experiment/metastore/compaction/scheduler/metrics.go create mode 100644 pkg/experiment/metastore/compaction/scheduler/metrics_test.go create mode 100644 pkg/experiment/metastore/compaction/scheduler/schedule.go create mode 100644 pkg/experiment/metastore/compaction/scheduler/schedule_test.go create mode 100644 pkg/experiment/metastore/compaction/scheduler/scheduler.go create mode 100644 pkg/experiment/metastore/compaction/scheduler/scheduler_queue.go create mode 100644 pkg/experiment/metastore/compaction/scheduler/scheduler_queue_test.go create mode 100644 pkg/experiment/metastore/compaction/scheduler/scheduler_test.go create mode 100644 pkg/experiment/metastore/compaction/scheduler/store/job_plan_store.go create mode 100644 pkg/experiment/metastore/compaction/scheduler/store/job_plan_store_test.go create mode 100644 pkg/experiment/metastore/compaction/scheduler/store/job_state_store.go create mode 100644 pkg/experiment/metastore/compaction/scheduler/store/job_state_store_test.go create mode 100644 pkg/experiment/metastore/compaction/scheduler/store/job_store.go delete mode 100644 pkg/experiment/metastore/compaction_planner.go delete mode 100644 pkg/experiment/metastore/compaction_queue.go delete mode 100644 pkg/experiment/metastore/compaction_queue_test.go delete mode 100644 pkg/experiment/metastore/compactionpb/compaction.pb.go delete mode 100644 pkg/experiment/metastore/compactionpb/compaction.proto delete mode 100644 pkg/experiment/metastore/compactionpb/compaction_vtproto.pb.go delete mode 100644 pkg/experiment/metastore/index/partition_key.go delete mode 100644 pkg/experiment/metastore/index/store.go create mode 100644 pkg/experiment/metastore/index/store/index_store.go create mode 100644 pkg/experiment/metastore/index/store/partition_key.go create mode 100644 pkg/experiment/metastore/index/store/partition_key_test.go delete mode 100644 pkg/experiment/metastore/markers/deletion_markers.go delete mode 100644 pkg/experiment/metastore/markers/deletion_markers_test.go create mode 100644 pkg/experiment/metastore/store/store.go delete mode 100644 pkg/experiment/metastore/storeutils/storeutils.go create mode 100644 pkg/experiment/metastore/tombstones/store/tombstone_store.go create mode 100644 pkg/experiment/metastore/tombstones/store/tombstone_store_test.go create mode 100644 pkg/experiment/metastore/tombstones/tombstone_queue.go create mode 100644 pkg/experiment/metastore/tombstones/tombstones.go create mode 100644 pkg/objstore/not_found.go create mode 100644 pkg/test/boltdb.go create mode 100644 pkg/test/idempotence.go create mode 100644 pkg/test/mocks/mockcompactor/mock_block_queue_store.go create mode 100644 pkg/test/mocks/mockcompactor/mock_tombstones.go create mode 100644 pkg/test/mocks/mockscheduler/mock_job_store.go create mode 100644 pkg/test/time.go create mode 100644 pkg/util/concurrency.go diff --git a/.mockery.yaml b/.mockery.yaml index 0fa2a207d3..383aebb6a8 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -33,6 +33,9 @@ packages: github.com/grafana/pyroscope/pkg/experiment/metastore/index: interfaces: Store: + github.com/grafana/pyroscope/pkg/experiment/distributor/placement: + interfaces: + Placement: github.com/grafana/pyroscope/pkg/experiment/distributor/placement/adaptive_placement: interfaces: Store: diff --git a/api/gen/proto/go/metastore/v1/compactor.pb.go b/api/gen/proto/go/metastore/v1/compactor.pb.go index e2bcabd6f4..ccc897dc28 100644 --- a/api/gen/proto/go/metastore/v1/compactor.pb.go +++ b/api/gen/proto/go/metastore/v1/compactor.pb.go @@ -20,58 +20,52 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -type CompactionStatus int32 +type CompactionJobStatus int32 const ( - CompactionStatus_COMPACTION_STATUS_UNSPECIFIED CompactionStatus = 0 - CompactionStatus_COMPACTION_STATUS_IN_PROGRESS CompactionStatus = 1 - CompactionStatus_COMPACTION_STATUS_SUCCESS CompactionStatus = 2 - CompactionStatus_COMPACTION_STATUS_FAILURE CompactionStatus = 3 - CompactionStatus_COMPACTION_STATUS_CANCELLED CompactionStatus = 4 + CompactionJobStatus_COMPACTION_STATUS_UNSPECIFIED CompactionJobStatus = 0 + CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS CompactionJobStatus = 1 + CompactionJobStatus_COMPACTION_STATUS_SUCCESS CompactionJobStatus = 2 ) -// Enum value maps for CompactionStatus. +// Enum value maps for CompactionJobStatus. var ( - CompactionStatus_name = map[int32]string{ + CompactionJobStatus_name = map[int32]string{ 0: "COMPACTION_STATUS_UNSPECIFIED", 1: "COMPACTION_STATUS_IN_PROGRESS", 2: "COMPACTION_STATUS_SUCCESS", - 3: "COMPACTION_STATUS_FAILURE", - 4: "COMPACTION_STATUS_CANCELLED", } - CompactionStatus_value = map[string]int32{ + CompactionJobStatus_value = map[string]int32{ "COMPACTION_STATUS_UNSPECIFIED": 0, "COMPACTION_STATUS_IN_PROGRESS": 1, "COMPACTION_STATUS_SUCCESS": 2, - "COMPACTION_STATUS_FAILURE": 3, - "COMPACTION_STATUS_CANCELLED": 4, } ) -func (x CompactionStatus) Enum() *CompactionStatus { - p := new(CompactionStatus) +func (x CompactionJobStatus) Enum() *CompactionJobStatus { + p := new(CompactionJobStatus) *p = x return p } -func (x CompactionStatus) String() string { +func (x CompactionJobStatus) String() string { return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (CompactionStatus) Descriptor() protoreflect.EnumDescriptor { +func (CompactionJobStatus) Descriptor() protoreflect.EnumDescriptor { return file_metastore_v1_compactor_proto_enumTypes[0].Descriptor() } -func (CompactionStatus) Type() protoreflect.EnumType { +func (CompactionJobStatus) Type() protoreflect.EnumType { return &file_metastore_v1_compactor_proto_enumTypes[0] } -func (x CompactionStatus) Number() protoreflect.EnumNumber { +func (x CompactionJobStatus) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use CompactionStatus.Descriptor instead. -func (CompactionStatus) EnumDescriptor() ([]byte, []int) { +// Deprecated: Use CompactionJobStatus.Descriptor instead. +func (CompactionJobStatus) EnumDescriptor() ([]byte, []int) { return file_metastore_v1_compactor_proto_rawDescGZIP(), []int{0} } @@ -80,8 +74,7 @@ type PollCompactionJobsRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // A batch of status updates for in-progress jobs from a worker. - JobStatusUpdates []*CompactionJobStatus `protobuf:"bytes,1,rep,name=job_status_updates,json=jobStatusUpdates,proto3" json:"job_status_updates,omitempty"` + StatusUpdates []*CompactionJobStatusUpdate `protobuf:"bytes,1,rep,name=status_updates,json=statusUpdates,proto3" json:"status_updates,omitempty"` // How many new jobs a worker can be assigned to. JobCapacity uint32 `protobuf:"varint,2,opt,name=job_capacity,json=jobCapacity,proto3" json:"job_capacity,omitempty"` } @@ -118,9 +111,9 @@ func (*PollCompactionJobsRequest) Descriptor() ([]byte, []int) { return file_metastore_v1_compactor_proto_rawDescGZIP(), []int{0} } -func (x *PollCompactionJobsRequest) GetJobStatusUpdates() []*CompactionJobStatus { +func (x *PollCompactionJobsRequest) GetStatusUpdates() []*CompactionJobStatusUpdate { if x != nil { - return x.JobStatusUpdates + return x.StatusUpdates } return nil } @@ -137,7 +130,8 @@ type PollCompactionJobsResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - CompactionJobs []*CompactionJob `protobuf:"bytes,1,rep,name=compaction_jobs,json=compactionJobs,proto3" json:"compaction_jobs,omitempty"` + CompactionJobs []*CompactionJob `protobuf:"bytes,1,rep,name=compaction_jobs,json=compactionJobs,proto3" json:"compaction_jobs,omitempty"` + Assignments []*CompactionJobAssignment `protobuf:"bytes,2,rep,name=assignments,proto3" json:"assignments,omitempty"` } func (x *PollCompactionJobsResponse) Reset() { @@ -179,14 +173,28 @@ func (x *PollCompactionJobsResponse) GetCompactionJobs() []*CompactionJob { return nil } -type GetCompactionRequest struct { +func (x *PollCompactionJobsResponse) GetAssignments() []*CompactionJobAssignment { + if x != nil { + return x.Assignments + } + return nil +} + +type CompactionJob struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Shard uint32 `protobuf:"varint,2,opt,name=shard,proto3" json:"shard,omitempty"` + Tenant string `protobuf:"bytes,3,opt,name=tenant,proto3" json:"tenant,omitempty"` + CompactionLevel uint32 `protobuf:"varint,4,opt,name=compaction_level,json=compactionLevel,proto3" json:"compaction_level,omitempty"` + SourceBlocks []string `protobuf:"bytes,5,rep,name=source_blocks,json=sourceBlocks,proto3" json:"source_blocks,omitempty"` + Tombstones []*Tombstones `protobuf:"bytes,6,rep,name=tombstones,proto3" json:"tombstones,omitempty"` } -func (x *GetCompactionRequest) Reset() { - *x = GetCompactionRequest{} +func (x *CompactionJob) Reset() { + *x = CompactionJob{} if protoimpl.UnsafeEnabled { mi := &file_metastore_v1_compactor_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -194,13 +202,13 @@ func (x *GetCompactionRequest) Reset() { } } -func (x *GetCompactionRequest) String() string { +func (x *CompactionJob) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetCompactionRequest) ProtoMessage() {} +func (*CompactionJob) ProtoMessage() {} -func (x *GetCompactionRequest) ProtoReflect() protoreflect.Message { +func (x *CompactionJob) ProtoReflect() protoreflect.Message { mi := &file_metastore_v1_compactor_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -212,22 +220,64 @@ func (x *GetCompactionRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetCompactionRequest.ProtoReflect.Descriptor instead. -func (*GetCompactionRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use CompactionJob.ProtoReflect.Descriptor instead. +func (*CompactionJob) Descriptor() ([]byte, []int) { return file_metastore_v1_compactor_proto_rawDescGZIP(), []int{2} } -type GetCompactionResponse struct { +func (x *CompactionJob) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CompactionJob) GetShard() uint32 { + if x != nil { + return x.Shard + } + return 0 +} + +func (x *CompactionJob) GetTenant() string { + if x != nil { + return x.Tenant + } + return "" +} + +func (x *CompactionJob) GetCompactionLevel() uint32 { + if x != nil { + return x.CompactionLevel + } + return 0 +} + +func (x *CompactionJob) GetSourceBlocks() []string { + if x != nil { + return x.SourceBlocks + } + return nil +} + +func (x *CompactionJob) GetTombstones() []*Tombstones { + if x != nil { + return x.Tombstones + } + return nil +} + +// Tombstones represent objects removed from the index but still stored. +type Tombstones struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // A list of all compaction jobs - CompactionJobs []*CompactionJob `protobuf:"bytes,1,rep,name=compaction_jobs,json=compactionJobs,proto3" json:"compaction_jobs,omitempty"` + Blocks *BlockTombstones `protobuf:"bytes,1,opt,name=blocks,proto3" json:"blocks,omitempty"` } -func (x *GetCompactionResponse) Reset() { - *x = GetCompactionResponse{} +func (x *Tombstones) Reset() { + *x = Tombstones{} if protoimpl.UnsafeEnabled { mi := &file_metastore_v1_compactor_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -235,13 +285,13 @@ func (x *GetCompactionResponse) Reset() { } } -func (x *GetCompactionResponse) String() string { +func (x *Tombstones) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetCompactionResponse) ProtoMessage() {} +func (*Tombstones) ProtoMessage() {} -func (x *GetCompactionResponse) ProtoReflect() protoreflect.Message { +func (x *Tombstones) ProtoReflect() protoreflect.Message { mi := &file_metastore_v1_compactor_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -253,41 +303,32 @@ func (x *GetCompactionResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetCompactionResponse.ProtoReflect.Descriptor instead. -func (*GetCompactionResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use Tombstones.ProtoReflect.Descriptor instead. +func (*Tombstones) Descriptor() ([]byte, []int) { return file_metastore_v1_compactor_proto_rawDescGZIP(), []int{3} } -func (x *GetCompactionResponse) GetCompactionJobs() []*CompactionJob { +func (x *Tombstones) GetBlocks() *BlockTombstones { if x != nil { - return x.CompactionJobs + return x.Blocks } return nil } -// One compaction job may result in multiple output blocks. -type CompactionJob struct { +type BlockTombstones struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Unique name of the job. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Options *CompactionOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` - // List of the input blocks. - Blocks []*BlockMeta `protobuf:"bytes,3,rep,name=blocks,proto3" json:"blocks,omitempty"` - Status *CompactionJobStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"` - // Fencing token. - RaftLogIndex uint64 `protobuf:"varint,5,opt,name=raft_log_index,json=raftLogIndex,proto3" json:"raft_log_index,omitempty"` - // Shard the blocks belong to. - Shard uint32 `protobuf:"varint,6,opt,name=shard,proto3" json:"shard,omitempty"` - // Optional, empty for compaction level 0. - TenantId string `protobuf:"bytes,7,opt,name=tenant_id,json=tenantId,proto3" json:"tenant_id,omitempty"` - CompactionLevel uint32 `protobuf:"varint,8,opt,name=compaction_level,json=compactionLevel,proto3" json:"compaction_level,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Shard uint32 `protobuf:"varint,2,opt,name=shard,proto3" json:"shard,omitempty"` + Tenant string `protobuf:"bytes,3,opt,name=tenant,proto3" json:"tenant,omitempty"` + CompactionLevel uint32 `protobuf:"varint,4,opt,name=compaction_level,json=compactionLevel,proto3" json:"compaction_level,omitempty"` + Blocks []string `protobuf:"bytes,5,rep,name=blocks,proto3" json:"blocks,omitempty"` } -func (x *CompactionJob) Reset() { - *x = CompactionJob{} +func (x *BlockTombstones) Reset() { + *x = BlockTombstones{} if protoimpl.UnsafeEnabled { mi := &file_metastore_v1_compactor_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -295,13 +336,13 @@ func (x *CompactionJob) Reset() { } } -func (x *CompactionJob) String() string { +func (x *BlockTombstones) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CompactionJob) ProtoMessage() {} +func (*BlockTombstones) ProtoMessage() {} -func (x *CompactionJob) ProtoReflect() protoreflect.Message { +func (x *BlockTombstones) ProtoReflect() protoreflect.Message { mi := &file_metastore_v1_compactor_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -313,80 +354,58 @@ func (x *CompactionJob) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CompactionJob.ProtoReflect.Descriptor instead. -func (*CompactionJob) Descriptor() ([]byte, []int) { +// Deprecated: Use BlockTombstones.ProtoReflect.Descriptor instead. +func (*BlockTombstones) Descriptor() ([]byte, []int) { return file_metastore_v1_compactor_proto_rawDescGZIP(), []int{4} } -func (x *CompactionJob) GetName() string { +func (x *BlockTombstones) GetName() string { if x != nil { return x.Name } return "" } -func (x *CompactionJob) GetOptions() *CompactionOptions { - if x != nil { - return x.Options - } - return nil -} - -func (x *CompactionJob) GetBlocks() []*BlockMeta { - if x != nil { - return x.Blocks - } - return nil -} - -func (x *CompactionJob) GetStatus() *CompactionJobStatus { - if x != nil { - return x.Status - } - return nil -} - -func (x *CompactionJob) GetRaftLogIndex() uint64 { - if x != nil { - return x.RaftLogIndex - } - return 0 -} - -func (x *CompactionJob) GetShard() uint32 { +func (x *BlockTombstones) GetShard() uint32 { if x != nil { return x.Shard } return 0 } -func (x *CompactionJob) GetTenantId() string { +func (x *BlockTombstones) GetTenant() string { if x != nil { - return x.TenantId + return x.Tenant } return "" } -func (x *CompactionJob) GetCompactionLevel() uint32 { +func (x *BlockTombstones) GetCompactionLevel() uint32 { if x != nil { return x.CompactionLevel } return 0 } -type CompactionOptions struct { +func (x *BlockTombstones) GetBlocks() []string { + if x != nil { + return x.Blocks + } + return nil +} + +type CompactionJobAssignment struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // How often the compaction worker should update - // the job status. If overdue, the job ownership - // is revoked. - StatusUpdateIntervalSeconds uint64 `protobuf:"varint,1,opt,name=status_update_interval_seconds,json=statusUpdateIntervalSeconds,proto3" json:"status_update_interval_seconds,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Token uint64 `protobuf:"varint,2,opt,name=token,proto3" json:"token,omitempty"` + LeaseExpiresAt int64 `protobuf:"varint,3,opt,name=lease_expires_at,json=leaseExpiresAt,proto3" json:"lease_expires_at,omitempty"` } -func (x *CompactionOptions) Reset() { - *x = CompactionOptions{} +func (x *CompactionJobAssignment) Reset() { + *x = CompactionJobAssignment{} if protoimpl.UnsafeEnabled { mi := &file_metastore_v1_compactor_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -394,13 +413,13 @@ func (x *CompactionOptions) Reset() { } } -func (x *CompactionOptions) String() string { +func (x *CompactionJobAssignment) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CompactionOptions) ProtoMessage() {} +func (*CompactionJobAssignment) ProtoMessage() {} -func (x *CompactionOptions) ProtoReflect() protoreflect.Message { +func (x *CompactionJobAssignment) ProtoReflect() protoreflect.Message { mi := &file_metastore_v1_compactor_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -412,49 +431,46 @@ func (x *CompactionOptions) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CompactionOptions.ProtoReflect.Descriptor instead. -func (*CompactionOptions) Descriptor() ([]byte, []int) { +// Deprecated: Use CompactionJobAssignment.ProtoReflect.Descriptor instead. +func (*CompactionJobAssignment) Descriptor() ([]byte, []int) { return file_metastore_v1_compactor_proto_rawDescGZIP(), []int{5} } -func (x *CompactionOptions) GetStatusUpdateIntervalSeconds() uint64 { +func (x *CompactionJobAssignment) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CompactionJobAssignment) GetToken() uint64 { + if x != nil { + return x.Token + } + return 0 +} + +func (x *CompactionJobAssignment) GetLeaseExpiresAt() int64 { if x != nil { - return x.StatusUpdateIntervalSeconds + return x.LeaseExpiresAt } return 0 } -type CompactionJobStatus struct { +type CompactionJobStatusUpdate struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - JobName string `protobuf:"bytes,1,opt,name=job_name,json=jobName,proto3" json:"job_name,omitempty"` - // Status update allows the planner to keep - // track of the job ownership and compaction - // progress: - // - If the job status is other than IN_PROGRESS, - // the ownership of the job is revoked. - // - FAILURE must only be sent if the failure is - // persistent and the compaction can't be accomplished. - // - completed_job must be empty if the status is - // other than SUCCESS, and vice-versa. - // - UNSPECIFIED must be sent if the worker rejects - // or cancels the compaction job. - // - // Partial results/status is not allowed. - Status CompactionStatus `protobuf:"varint,2,opt,name=status,proto3,enum=metastore.v1.CompactionStatus" json:"status,omitempty"` - CompletedJob *CompletedJob `protobuf:"bytes,3,opt,name=completed_job,json=completedJob,proto3" json:"completed_job,omitempty"` - // Fencing token. - RaftLogIndex uint64 `protobuf:"varint,4,opt,name=raft_log_index,json=raftLogIndex,proto3" json:"raft_log_index,omitempty"` - // Shard the blocks belong to. - Shard uint32 `protobuf:"varint,5,opt,name=shard,proto3" json:"shard,omitempty"` - // Optional, empty for compaction level 0. - TenantId string `protobuf:"bytes,6,opt,name=tenant_id,json=tenantId,proto3" json:"tenant_id,omitempty"` -} - -func (x *CompactionJobStatus) Reset() { - *x = CompactionJobStatus{} + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Token uint64 `protobuf:"varint,2,opt,name=token,proto3" json:"token,omitempty"` + Status CompactionJobStatus `protobuf:"varint,3,opt,name=status,proto3,enum=metastore.v1.CompactionJobStatus" json:"status,omitempty"` + // Only present if the job completed successfully. + CompactedBlocks *CompactedBlocks `protobuf:"bytes,4,opt,name=compacted_blocks,json=compactedBlocks,proto3" json:"compacted_blocks,omitempty"` +} + +func (x *CompactionJobStatusUpdate) Reset() { + *x = CompactionJobStatusUpdate{} if protoimpl.UnsafeEnabled { mi := &file_metastore_v1_compactor_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -462,13 +478,13 @@ func (x *CompactionJobStatus) Reset() { } } -func (x *CompactionJobStatus) String() string { +func (x *CompactionJobStatusUpdate) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CompactionJobStatus) ProtoMessage() {} +func (*CompactionJobStatusUpdate) ProtoMessage() {} -func (x *CompactionJobStatus) ProtoReflect() protoreflect.Message { +func (x *CompactionJobStatusUpdate) ProtoReflect() protoreflect.Message { mi := &file_metastore_v1_compactor_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -480,63 +496,50 @@ func (x *CompactionJobStatus) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CompactionJobStatus.ProtoReflect.Descriptor instead. -func (*CompactionJobStatus) Descriptor() ([]byte, []int) { +// Deprecated: Use CompactionJobStatusUpdate.ProtoReflect.Descriptor instead. +func (*CompactionJobStatusUpdate) Descriptor() ([]byte, []int) { return file_metastore_v1_compactor_proto_rawDescGZIP(), []int{6} } -func (x *CompactionJobStatus) GetJobName() string { +func (x *CompactionJobStatusUpdate) GetName() string { if x != nil { - return x.JobName + return x.Name } return "" } -func (x *CompactionJobStatus) GetStatus() CompactionStatus { - if x != nil { - return x.Status - } - return CompactionStatus_COMPACTION_STATUS_UNSPECIFIED -} - -func (x *CompactionJobStatus) GetCompletedJob() *CompletedJob { - if x != nil { - return x.CompletedJob - } - return nil -} - -func (x *CompactionJobStatus) GetRaftLogIndex() uint64 { +func (x *CompactionJobStatusUpdate) GetToken() uint64 { if x != nil { - return x.RaftLogIndex + return x.Token } return 0 } -func (x *CompactionJobStatus) GetShard() uint32 { +func (x *CompactionJobStatusUpdate) GetStatus() CompactionJobStatus { if x != nil { - return x.Shard + return x.Status } - return 0 + return CompactionJobStatus_COMPACTION_STATUS_UNSPECIFIED } -func (x *CompactionJobStatus) GetTenantId() string { +func (x *CompactionJobStatusUpdate) GetCompactedBlocks() *CompactedBlocks { if x != nil { - return x.TenantId + return x.CompactedBlocks } - return "" + return nil } -type CompletedJob struct { +type CompactedBlocks struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Blocks []*BlockMeta `protobuf:"bytes,1,rep,name=blocks,proto3" json:"blocks,omitempty"` + SourceBlocks *BlockList `protobuf:"bytes,1,opt,name=source_blocks,json=sourceBlocks,proto3" json:"source_blocks,omitempty"` + NewBlocks []*BlockMeta `protobuf:"bytes,2,rep,name=new_blocks,json=newBlocks,proto3" json:"new_blocks,omitempty"` } -func (x *CompletedJob) Reset() { - *x = CompletedJob{} +func (x *CompactedBlocks) Reset() { + *x = CompactedBlocks{} if protoimpl.UnsafeEnabled { mi := &file_metastore_v1_compactor_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -544,13 +547,13 @@ func (x *CompletedJob) Reset() { } } -func (x *CompletedJob) String() string { +func (x *CompactedBlocks) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CompletedJob) ProtoMessage() {} +func (*CompactedBlocks) ProtoMessage() {} -func (x *CompletedJob) ProtoReflect() protoreflect.Message { +func (x *CompactedBlocks) ProtoReflect() protoreflect.Message { mi := &file_metastore_v1_compactor_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -562,14 +565,21 @@ func (x *CompletedJob) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CompletedJob.ProtoReflect.Descriptor instead. -func (*CompletedJob) Descriptor() ([]byte, []int) { +// Deprecated: Use CompactedBlocks.ProtoReflect.Descriptor instead. +func (*CompactedBlocks) Descriptor() ([]byte, []int) { return file_metastore_v1_compactor_proto_rawDescGZIP(), []int{7} } -func (x *CompletedJob) GetBlocks() []*BlockMeta { +func (x *CompactedBlocks) GetSourceBlocks() *BlockList { if x != nil { - return x.Blocks + return x.SourceBlocks + } + return nil +} + +func (x *CompactedBlocks) GetNewBlocks() []*BlockMeta { + if x != nil { + return x.NewBlocks } return nil } @@ -581,109 +591,111 @@ var file_metastore_v1_compactor_proto_rawDesc = []byte{ 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x18, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8f, 0x01, 0x0a, 0x19, 0x50, 0x6f, 0x6c, 0x6c, 0x43, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8e, 0x01, 0x0a, 0x19, 0x50, 0x6f, 0x6c, 0x6c, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x4f, 0x0a, 0x12, 0x6a, 0x6f, 0x62, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x21, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x10, 0x6a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x6f, 0x62, 0x5f, 0x63, 0x61, 0x70, - 0x61, 0x63, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6a, 0x6f, 0x62, - 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x22, 0x62, 0x0a, 0x1a, 0x50, 0x6f, 0x6c, 0x6c, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6d, + 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x70, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x6f, 0x62, 0x5f, 0x63, 0x61, 0x70, 0x61, + 0x63, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6a, 0x6f, 0x62, 0x43, + 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x22, 0xab, 0x01, 0x0a, 0x1a, 0x50, 0x6f, 0x6c, 0x6c, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6a, 0x6f, 0x62, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x52, 0x0e, 0x63, 0x6f, - 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x73, 0x22, 0x16, 0x0a, 0x14, - 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x22, 0x5d, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, - 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6a, 0x6f, 0x62, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x4a, 0x6f, 0x62, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, - 0x6f, 0x62, 0x73, 0x22, 0xce, 0x02, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6d, 0x65, 0x74, - 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x06, 0x62, - 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, - 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, - 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x72, 0x61, 0x66, 0x74, 0x4c, 0x6f, - 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1b, 0x0a, 0x09, - 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, - 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x08, 0x20, + 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0x47, 0x0a, 0x0b, + 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x41, 0x73, + 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xdb, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, + 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, - 0x65, 0x76, 0x65, 0x6c, 0x22, 0x58, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x43, 0x0a, 0x1e, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x76, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x1b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, - 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x82, - 0x02, 0x0a, 0x13, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x36, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x1e, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3f, 0x0a, 0x0d, 0x63, 0x6f, 0x6d, - 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6a, 0x6f, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x52, 0x0c, 0x63, 0x6f, - 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x61, - 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x0c, 0x72, 0x61, 0x66, 0x74, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, - 0x74, 0x49, 0x64, 0x22, 0x3f, 0x0a, 0x0c, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, - 0x4a, 0x6f, 0x62, 0x12, 0x2f, 0x0a, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x01, 0x20, + 0x65, 0x76, 0x65, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, + 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x38, 0x0a, 0x0a, 0x74, 0x6f, 0x6d, + 0x62, 0x73, 0x74, 0x6f, 0x6e, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, + 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x6d, + 0x62, 0x73, 0x74, 0x6f, 0x6e, 0x65, 0x73, 0x52, 0x0a, 0x74, 0x6f, 0x6d, 0x62, 0x73, 0x74, 0x6f, + 0x6e, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x0a, 0x54, 0x6f, 0x6d, 0x62, 0x73, 0x74, 0x6f, 0x6e, 0x65, + 0x73, 0x12, 0x35, 0x0a, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x6f, 0x6d, 0x62, 0x73, 0x74, 0x6f, 0x6e, 0x65, 0x73, + 0x52, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x22, 0x96, 0x01, 0x0a, 0x0f, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x54, 0x6f, 0x6d, 0x62, 0x73, 0x74, 0x6f, 0x6e, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x12, 0x29, + 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x73, 0x22, 0x6d, 0x0a, 0x17, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, + 0x6f, 0x62, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, + 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, + 0x22, 0xca, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, + 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x65, 0x64, + 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, + 0x70, 0x61, 0x63, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x0f, 0x63, 0x6f, + 0x6d, 0x70, 0x61, 0x63, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x22, 0x87, 0x01, + 0x0a, 0x0f, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x73, 0x12, 0x3c, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x69, 0x73, + 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x12, + 0x36, 0x0a, 0x0a, 0x6e, 0x65, 0x77, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x06, 0x62, 0x6c, - 0x6f, 0x63, 0x6b, 0x73, 0x2a, 0xb7, 0x01, 0x0a, 0x10, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x4f, 0x4d, - 0x50, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, - 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, - 0x53, 0x5f, 0x49, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, - 0x1d, 0x0a, 0x19, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, - 0x41, 0x54, 0x55, 0x53, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x02, 0x12, 0x1d, - 0x0a, 0x19, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, - 0x54, 0x55, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x03, 0x12, 0x1f, 0x0a, - 0x1b, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, - 0x55, 0x53, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x32, 0x7e, - 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x69, 0x0a, 0x12, 0x50, 0x6f, 0x6c, 0x6c, 0x43, 0x6f, 0x6d, 0x70, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0x27, 0x2e, 0x6d, 0x65, 0x74, 0x61, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x6c, 0x43, 0x6f, 0x6d, - 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x6c, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0xbb, - 0x01, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x67, 0x72, 0x61, 0x66, 0x61, 0x6e, 0x61, 0x2f, 0x70, 0x79, 0x72, 0x6f, 0x73, 0x63, - 0x6f, 0x70, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, - 0x31, 0x3b, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x76, 0x31, 0xa2, 0x02, 0x03, - 0x4d, 0x58, 0x58, 0xaa, 0x02, 0x0c, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, - 0x56, 0x31, 0xca, 0x02, 0x0c, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5c, 0x56, - 0x31, 0xe2, 0x02, 0x18, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, - 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0d, 0x4d, - 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x09, 0x6e, 0x65, + 0x77, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x2a, 0x7a, 0x0a, 0x13, 0x43, 0x6f, 0x6d, 0x70, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, + 0x0a, 0x1d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, + 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x49, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x47, 0x52, 0x45, + 0x53, 0x53, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, + 0x53, 0x10, 0x02, 0x32, 0x7e, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x69, 0x0a, 0x12, 0x50, 0x6f, 0x6c, 0x6c, + 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0x27, + 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, + 0x6c, 0x6c, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x6c, 0x43, 0x6f, 0x6d, 0x70, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x42, 0xbb, 0x01, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x65, 0x74, 0x61, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x61, 0x66, 0x61, 0x6e, 0x61, 0x2f, 0x70, + 0x79, 0x72, 0x6f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x65, 0x6e, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x76, 0x31, 0xa2, 0x02, 0x03, 0x4d, 0x58, 0x58, 0xaa, 0x02, 0x0c, 0x4d, 0x65, 0x74, 0x61, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x0c, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x18, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0xea, 0x02, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x56, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -701,27 +713,28 @@ func file_metastore_v1_compactor_proto_rawDescGZIP() []byte { var file_metastore_v1_compactor_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_metastore_v1_compactor_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_metastore_v1_compactor_proto_goTypes = []any{ - (CompactionStatus)(0), // 0: metastore.v1.CompactionStatus + (CompactionJobStatus)(0), // 0: metastore.v1.CompactionJobStatus (*PollCompactionJobsRequest)(nil), // 1: metastore.v1.PollCompactionJobsRequest (*PollCompactionJobsResponse)(nil), // 2: metastore.v1.PollCompactionJobsResponse - (*GetCompactionRequest)(nil), // 3: metastore.v1.GetCompactionRequest - (*GetCompactionResponse)(nil), // 4: metastore.v1.GetCompactionResponse - (*CompactionJob)(nil), // 5: metastore.v1.CompactionJob - (*CompactionOptions)(nil), // 6: metastore.v1.CompactionOptions - (*CompactionJobStatus)(nil), // 7: metastore.v1.CompactionJobStatus - (*CompletedJob)(nil), // 8: metastore.v1.CompletedJob - (*BlockMeta)(nil), // 9: metastore.v1.BlockMeta + (*CompactionJob)(nil), // 3: metastore.v1.CompactionJob + (*Tombstones)(nil), // 4: metastore.v1.Tombstones + (*BlockTombstones)(nil), // 5: metastore.v1.BlockTombstones + (*CompactionJobAssignment)(nil), // 6: metastore.v1.CompactionJobAssignment + (*CompactionJobStatusUpdate)(nil), // 7: metastore.v1.CompactionJobStatusUpdate + (*CompactedBlocks)(nil), // 8: metastore.v1.CompactedBlocks + (*BlockList)(nil), // 9: metastore.v1.BlockList + (*BlockMeta)(nil), // 10: metastore.v1.BlockMeta } var file_metastore_v1_compactor_proto_depIdxs = []int32{ - 7, // 0: metastore.v1.PollCompactionJobsRequest.job_status_updates:type_name -> metastore.v1.CompactionJobStatus - 5, // 1: metastore.v1.PollCompactionJobsResponse.compaction_jobs:type_name -> metastore.v1.CompactionJob - 5, // 2: metastore.v1.GetCompactionResponse.compaction_jobs:type_name -> metastore.v1.CompactionJob - 6, // 3: metastore.v1.CompactionJob.options:type_name -> metastore.v1.CompactionOptions - 9, // 4: metastore.v1.CompactionJob.blocks:type_name -> metastore.v1.BlockMeta - 7, // 5: metastore.v1.CompactionJob.status:type_name -> metastore.v1.CompactionJobStatus - 0, // 6: metastore.v1.CompactionJobStatus.status:type_name -> metastore.v1.CompactionStatus - 8, // 7: metastore.v1.CompactionJobStatus.completed_job:type_name -> metastore.v1.CompletedJob - 9, // 8: metastore.v1.CompletedJob.blocks:type_name -> metastore.v1.BlockMeta + 7, // 0: metastore.v1.PollCompactionJobsRequest.status_updates:type_name -> metastore.v1.CompactionJobStatusUpdate + 3, // 1: metastore.v1.PollCompactionJobsResponse.compaction_jobs:type_name -> metastore.v1.CompactionJob + 6, // 2: metastore.v1.PollCompactionJobsResponse.assignments:type_name -> metastore.v1.CompactionJobAssignment + 4, // 3: metastore.v1.CompactionJob.tombstones:type_name -> metastore.v1.Tombstones + 5, // 4: metastore.v1.Tombstones.blocks:type_name -> metastore.v1.BlockTombstones + 0, // 5: metastore.v1.CompactionJobStatusUpdate.status:type_name -> metastore.v1.CompactionJobStatus + 8, // 6: metastore.v1.CompactionJobStatusUpdate.compacted_blocks:type_name -> metastore.v1.CompactedBlocks + 9, // 7: metastore.v1.CompactedBlocks.source_blocks:type_name -> metastore.v1.BlockList + 10, // 8: metastore.v1.CompactedBlocks.new_blocks:type_name -> metastore.v1.BlockMeta 1, // 9: metastore.v1.CompactionService.PollCompactionJobs:input_type -> metastore.v1.PollCompactionJobsRequest 2, // 10: metastore.v1.CompactionService.PollCompactionJobs:output_type -> metastore.v1.PollCompactionJobsResponse 10, // [10:11] is the sub-list for method output_type @@ -763,7 +776,7 @@ func file_metastore_v1_compactor_proto_init() { } } file_metastore_v1_compactor_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*GetCompactionRequest); i { + switch v := v.(*CompactionJob); i { case 0: return &v.state case 1: @@ -775,7 +788,7 @@ func file_metastore_v1_compactor_proto_init() { } } file_metastore_v1_compactor_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*GetCompactionResponse); i { + switch v := v.(*Tombstones); i { case 0: return &v.state case 1: @@ -787,7 +800,7 @@ func file_metastore_v1_compactor_proto_init() { } } file_metastore_v1_compactor_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*CompactionJob); i { + switch v := v.(*BlockTombstones); i { case 0: return &v.state case 1: @@ -799,7 +812,7 @@ func file_metastore_v1_compactor_proto_init() { } } file_metastore_v1_compactor_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*CompactionOptions); i { + switch v := v.(*CompactionJobAssignment); i { case 0: return &v.state case 1: @@ -811,7 +824,7 @@ func file_metastore_v1_compactor_proto_init() { } } file_metastore_v1_compactor_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*CompactionJobStatus); i { + switch v := v.(*CompactionJobStatusUpdate); i { case 0: return &v.state case 1: @@ -823,7 +836,7 @@ func file_metastore_v1_compactor_proto_init() { } } file_metastore_v1_compactor_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*CompletedJob); i { + switch v := v.(*CompactedBlocks); i { case 0: return &v.state case 1: diff --git a/api/gen/proto/go/metastore/v1/compactor_vtproto.pb.go b/api/gen/proto/go/metastore/v1/compactor_vtproto.pb.go index dcacf95790..5987cc46aa 100644 --- a/api/gen/proto/go/metastore/v1/compactor_vtproto.pb.go +++ b/api/gen/proto/go/metastore/v1/compactor_vtproto.pb.go @@ -29,12 +29,12 @@ func (m *PollCompactionJobsRequest) CloneVT() *PollCompactionJobsRequest { } r := new(PollCompactionJobsRequest) r.JobCapacity = m.JobCapacity - if rhs := m.JobStatusUpdates; rhs != nil { - tmpContainer := make([]*CompactionJobStatus, len(rhs)) + if rhs := m.StatusUpdates; rhs != nil { + tmpContainer := make([]*CompactionJobStatusUpdate, len(rhs)) for k, v := range rhs { tmpContainer[k] = v.CloneVT() } - r.JobStatusUpdates = tmpContainer + r.StatusUpdates = tmpContainer } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) @@ -59,6 +59,13 @@ func (m *PollCompactionJobsResponse) CloneVT() *PollCompactionJobsResponse { } r.CompactionJobs = tmpContainer } + if rhs := m.Assignments; rhs != nil { + tmpContainer := make([]*CompactionJobAssignment, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Assignments = tmpContainer + } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -70,11 +77,27 @@ func (m *PollCompactionJobsResponse) CloneMessageVT() proto.Message { return m.CloneVT() } -func (m *GetCompactionRequest) CloneVT() *GetCompactionRequest { +func (m *CompactionJob) CloneVT() *CompactionJob { if m == nil { - return (*GetCompactionRequest)(nil) + return (*CompactionJob)(nil) + } + r := new(CompactionJob) + r.Name = m.Name + r.Shard = m.Shard + r.Tenant = m.Tenant + r.CompactionLevel = m.CompactionLevel + if rhs := m.SourceBlocks; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.SourceBlocks = tmpContainer + } + if rhs := m.Tombstones; rhs != nil { + tmpContainer := make([]*Tombstones, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Tombstones = tmpContainer } - r := new(GetCompactionRequest) if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -82,22 +105,16 @@ func (m *GetCompactionRequest) CloneVT() *GetCompactionRequest { return r } -func (m *GetCompactionRequest) CloneMessageVT() proto.Message { +func (m *CompactionJob) CloneMessageVT() proto.Message { return m.CloneVT() } -func (m *GetCompactionResponse) CloneVT() *GetCompactionResponse { +func (m *Tombstones) CloneVT() *Tombstones { if m == nil { - return (*GetCompactionResponse)(nil) - } - r := new(GetCompactionResponse) - if rhs := m.CompactionJobs; rhs != nil { - tmpContainer := make([]*CompactionJob, len(rhs)) - for k, v := range rhs { - tmpContainer[k] = v.CloneVT() - } - r.CompactionJobs = tmpContainer + return (*Tombstones)(nil) } + r := new(Tombstones) + r.Blocks = m.Blocks.CloneVT() if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -105,27 +122,22 @@ func (m *GetCompactionResponse) CloneVT() *GetCompactionResponse { return r } -func (m *GetCompactionResponse) CloneMessageVT() proto.Message { +func (m *Tombstones) CloneMessageVT() proto.Message { return m.CloneVT() } -func (m *CompactionJob) CloneVT() *CompactionJob { +func (m *BlockTombstones) CloneVT() *BlockTombstones { if m == nil { - return (*CompactionJob)(nil) + return (*BlockTombstones)(nil) } - r := new(CompactionJob) + r := new(BlockTombstones) r.Name = m.Name - r.Options = m.Options.CloneVT() - r.Status = m.Status.CloneVT() - r.RaftLogIndex = m.RaftLogIndex r.Shard = m.Shard - r.TenantId = m.TenantId + r.Tenant = m.Tenant r.CompactionLevel = m.CompactionLevel if rhs := m.Blocks; rhs != nil { - tmpContainer := make([]*BlockMeta, len(rhs)) - for k, v := range rhs { - tmpContainer[k] = v.CloneVT() - } + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) r.Blocks = tmpContainer } if len(m.unknownFields) > 0 { @@ -135,16 +147,18 @@ func (m *CompactionJob) CloneVT() *CompactionJob { return r } -func (m *CompactionJob) CloneMessageVT() proto.Message { +func (m *BlockTombstones) CloneMessageVT() proto.Message { return m.CloneVT() } -func (m *CompactionOptions) CloneVT() *CompactionOptions { +func (m *CompactionJobAssignment) CloneVT() *CompactionJobAssignment { if m == nil { - return (*CompactionOptions)(nil) + return (*CompactionJobAssignment)(nil) } - r := new(CompactionOptions) - r.StatusUpdateIntervalSeconds = m.StatusUpdateIntervalSeconds + r := new(CompactionJobAssignment) + r.Name = m.Name + r.Token = m.Token + r.LeaseExpiresAt = m.LeaseExpiresAt if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -152,21 +166,19 @@ func (m *CompactionOptions) CloneVT() *CompactionOptions { return r } -func (m *CompactionOptions) CloneMessageVT() proto.Message { +func (m *CompactionJobAssignment) CloneMessageVT() proto.Message { return m.CloneVT() } -func (m *CompactionJobStatus) CloneVT() *CompactionJobStatus { +func (m *CompactionJobStatusUpdate) CloneVT() *CompactionJobStatusUpdate { if m == nil { - return (*CompactionJobStatus)(nil) + return (*CompactionJobStatusUpdate)(nil) } - r := new(CompactionJobStatus) - r.JobName = m.JobName + r := new(CompactionJobStatusUpdate) + r.Name = m.Name + r.Token = m.Token r.Status = m.Status - r.CompletedJob = m.CompletedJob.CloneVT() - r.RaftLogIndex = m.RaftLogIndex - r.Shard = m.Shard - r.TenantId = m.TenantId + r.CompactedBlocks = m.CompactedBlocks.CloneVT() if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -174,21 +186,22 @@ func (m *CompactionJobStatus) CloneVT() *CompactionJobStatus { return r } -func (m *CompactionJobStatus) CloneMessageVT() proto.Message { +func (m *CompactionJobStatusUpdate) CloneMessageVT() proto.Message { return m.CloneVT() } -func (m *CompletedJob) CloneVT() *CompletedJob { +func (m *CompactedBlocks) CloneVT() *CompactedBlocks { if m == nil { - return (*CompletedJob)(nil) + return (*CompactedBlocks)(nil) } - r := new(CompletedJob) - if rhs := m.Blocks; rhs != nil { + r := new(CompactedBlocks) + r.SourceBlocks = m.SourceBlocks.CloneVT() + if rhs := m.NewBlocks; rhs != nil { tmpContainer := make([]*BlockMeta, len(rhs)) for k, v := range rhs { tmpContainer[k] = v.CloneVT() } - r.Blocks = tmpContainer + r.NewBlocks = tmpContainer } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) @@ -197,7 +210,7 @@ func (m *CompletedJob) CloneVT() *CompletedJob { return r } -func (m *CompletedJob) CloneMessageVT() proto.Message { +func (m *CompactedBlocks) CloneMessageVT() proto.Message { return m.CloneVT() } @@ -207,17 +220,17 @@ func (this *PollCompactionJobsRequest) EqualVT(that *PollCompactionJobsRequest) } else if this == nil || that == nil { return false } - if len(this.JobStatusUpdates) != len(that.JobStatusUpdates) { + if len(this.StatusUpdates) != len(that.StatusUpdates) { return false } - for i, vx := range this.JobStatusUpdates { - vy := that.JobStatusUpdates[i] + for i, vx := range this.StatusUpdates { + vy := that.StatusUpdates[i] if p, q := vx, vy; p != q { if p == nil { - p = &CompactionJobStatus{} + p = &CompactionJobStatusUpdate{} } if q == nil { - q = &CompactionJobStatus{} + q = &CompactionJobStatusUpdate{} } if !p.EqualVT(q) { return false @@ -260,6 +273,23 @@ func (this *PollCompactionJobsResponse) EqualVT(that *PollCompactionJobsResponse } } } + if len(this.Assignments) != len(that.Assignments) { + return false + } + for i, vx := range this.Assignments { + vy := that.Assignments[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &CompactionJobAssignment{} + } + if q == nil { + q = &CompactionJobAssignment{} + } + if !p.EqualVT(q) { + return false + } + } + } return string(this.unknownFields) == string(that.unknownFields) } @@ -270,39 +300,44 @@ func (this *PollCompactionJobsResponse) EqualMessageVT(thatMsg proto.Message) bo } return this.EqualVT(that) } -func (this *GetCompactionRequest) EqualVT(that *GetCompactionRequest) bool { +func (this *CompactionJob) EqualVT(that *CompactionJob) bool { if this == that { return true } else if this == nil || that == nil { return false } - return string(this.unknownFields) == string(that.unknownFields) -} - -func (this *GetCompactionRequest) EqualMessageVT(thatMsg proto.Message) bool { - that, ok := thatMsg.(*GetCompactionRequest) - if !ok { + if this.Name != that.Name { return false } - return this.EqualVT(that) -} -func (this *GetCompactionResponse) EqualVT(that *GetCompactionResponse) bool { - if this == that { - return true - } else if this == nil || that == nil { + if this.Shard != that.Shard { return false } - if len(this.CompactionJobs) != len(that.CompactionJobs) { + if this.Tenant != that.Tenant { return false } - for i, vx := range this.CompactionJobs { - vy := that.CompactionJobs[i] + if this.CompactionLevel != that.CompactionLevel { + return false + } + if len(this.SourceBlocks) != len(that.SourceBlocks) { + return false + } + for i, vx := range this.SourceBlocks { + vy := that.SourceBlocks[i] + if vx != vy { + return false + } + } + if len(this.Tombstones) != len(that.Tombstones) { + return false + } + for i, vx := range this.Tombstones { + vy := that.Tombstones[i] if p, q := vx, vy; p != q { if p == nil { - p = &CompactionJob{} + p = &Tombstones{} } if q == nil { - q = &CompactionJob{} + q = &Tombstones{} } if !p.EqualVT(q) { return false @@ -312,131 +347,136 @@ func (this *GetCompactionResponse) EqualVT(that *GetCompactionResponse) bool { return string(this.unknownFields) == string(that.unknownFields) } -func (this *GetCompactionResponse) EqualMessageVT(thatMsg proto.Message) bool { - that, ok := thatMsg.(*GetCompactionResponse) +func (this *CompactionJob) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*CompactionJob) if !ok { return false } return this.EqualVT(that) } -func (this *CompactionJob) EqualVT(that *CompactionJob) bool { +func (this *Tombstones) EqualVT(that *Tombstones) bool { if this == that { return true } else if this == nil || that == nil { return false } - if this.Name != that.Name { - return false - } - if !this.Options.EqualVT(that.Options) { + if !this.Blocks.EqualVT(that.Blocks) { return false } - if len(this.Blocks) != len(that.Blocks) { + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *Tombstones) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*Tombstones) + if !ok { return false } - for i, vx := range this.Blocks { - vy := that.Blocks[i] - if p, q := vx, vy; p != q { - if p == nil { - p = &BlockMeta{} - } - if q == nil { - q = &BlockMeta{} - } - if !p.EqualVT(q) { - return false - } - } - } - if !this.Status.EqualVT(that.Status) { + return this.EqualVT(that) +} +func (this *BlockTombstones) EqualVT(that *BlockTombstones) bool { + if this == that { + return true + } else if this == nil || that == nil { return false } - if this.RaftLogIndex != that.RaftLogIndex { + if this.Name != that.Name { return false } if this.Shard != that.Shard { return false } - if this.TenantId != that.TenantId { + if this.Tenant != that.Tenant { return false } if this.CompactionLevel != that.CompactionLevel { return false } + if len(this.Blocks) != len(that.Blocks) { + return false + } + for i, vx := range this.Blocks { + vy := that.Blocks[i] + if vx != vy { + return false + } + } return string(this.unknownFields) == string(that.unknownFields) } -func (this *CompactionJob) EqualMessageVT(thatMsg proto.Message) bool { - that, ok := thatMsg.(*CompactionJob) +func (this *BlockTombstones) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*BlockTombstones) if !ok { return false } return this.EqualVT(that) } -func (this *CompactionOptions) EqualVT(that *CompactionOptions) bool { +func (this *CompactionJobAssignment) EqualVT(that *CompactionJobAssignment) bool { if this == that { return true } else if this == nil || that == nil { return false } - if this.StatusUpdateIntervalSeconds != that.StatusUpdateIntervalSeconds { + if this.Name != that.Name { + return false + } + if this.Token != that.Token { + return false + } + if this.LeaseExpiresAt != that.LeaseExpiresAt { return false } return string(this.unknownFields) == string(that.unknownFields) } -func (this *CompactionOptions) EqualMessageVT(thatMsg proto.Message) bool { - that, ok := thatMsg.(*CompactionOptions) +func (this *CompactionJobAssignment) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*CompactionJobAssignment) if !ok { return false } return this.EqualVT(that) } -func (this *CompactionJobStatus) EqualVT(that *CompactionJobStatus) bool { +func (this *CompactionJobStatusUpdate) EqualVT(that *CompactionJobStatusUpdate) bool { if this == that { return true } else if this == nil || that == nil { return false } - if this.JobName != that.JobName { - return false - } - if this.Status != that.Status { - return false - } - if !this.CompletedJob.EqualVT(that.CompletedJob) { + if this.Name != that.Name { return false } - if this.RaftLogIndex != that.RaftLogIndex { + if this.Token != that.Token { return false } - if this.Shard != that.Shard { + if this.Status != that.Status { return false } - if this.TenantId != that.TenantId { + if !this.CompactedBlocks.EqualVT(that.CompactedBlocks) { return false } return string(this.unknownFields) == string(that.unknownFields) } -func (this *CompactionJobStatus) EqualMessageVT(thatMsg proto.Message) bool { - that, ok := thatMsg.(*CompactionJobStatus) +func (this *CompactionJobStatusUpdate) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*CompactionJobStatusUpdate) if !ok { return false } return this.EqualVT(that) } -func (this *CompletedJob) EqualVT(that *CompletedJob) bool { +func (this *CompactedBlocks) EqualVT(that *CompactedBlocks) bool { if this == that { return true } else if this == nil || that == nil { return false } - if len(this.Blocks) != len(that.Blocks) { + if !this.SourceBlocks.EqualVT(that.SourceBlocks) { return false } - for i, vx := range this.Blocks { - vy := that.Blocks[i] + if len(this.NewBlocks) != len(that.NewBlocks) { + return false + } + for i, vx := range this.NewBlocks { + vy := that.NewBlocks[i] if p, q := vx, vy; p != q { if p == nil { p = &BlockMeta{} @@ -452,8 +492,8 @@ func (this *CompletedJob) EqualVT(that *CompletedJob) bool { return string(this.unknownFields) == string(that.unknownFields) } -func (this *CompletedJob) EqualMessageVT(thatMsg proto.Message) bool { - that, ok := thatMsg.(*CompletedJob) +func (this *CompactedBlocks) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*CompactedBlocks) if !ok { return false } @@ -588,9 +628,9 @@ func (m *PollCompactionJobsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, er i-- dAtA[i] = 0x10 } - if len(m.JobStatusUpdates) > 0 { - for iNdEx := len(m.JobStatusUpdates) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.JobStatusUpdates[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if len(m.StatusUpdates) > 0 { + for iNdEx := len(m.StatusUpdates) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StatusUpdates[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -633,6 +673,18 @@ func (m *PollCompactionJobsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, e i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Assignments) > 0 { + for iNdEx := len(m.Assignments) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Assignments[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } if len(m.CompactionJobs) > 0 { for iNdEx := len(m.CompactionJobs) - 1; iNdEx >= 0; iNdEx-- { size, err := m.CompactionJobs[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) @@ -648,7 +700,7 @@ func (m *PollCompactionJobsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, e return len(dAtA) - i, nil } -func (m *GetCompactionRequest) MarshalVT() (dAtA []byte, err error) { +func (m *CompactionJob) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -661,12 +713,12 @@ func (m *GetCompactionRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetCompactionRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *CompactionJob) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetCompactionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CompactionJob) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -678,10 +730,55 @@ func (m *GetCompactionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Tombstones) > 0 { + for iNdEx := len(m.Tombstones) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Tombstones[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.SourceBlocks) > 0 { + for iNdEx := len(m.SourceBlocks) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SourceBlocks[iNdEx]) + copy(dAtA[i:], m.SourceBlocks[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SourceBlocks[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.CompactionLevel != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CompactionLevel)) + i-- + dAtA[i] = 0x20 + } + if len(m.Tenant) > 0 { + i -= len(m.Tenant) + copy(dAtA[i:], m.Tenant) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Tenant))) + i-- + dAtA[i] = 0x1a + } + if m.Shard != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Shard)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *GetCompactionResponse) MarshalVT() (dAtA []byte, err error) { +func (m *Tombstones) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -694,12 +791,12 @@ func (m *GetCompactionResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetCompactionResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *Tombstones) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetCompactionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Tombstones) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -711,22 +808,20 @@ func (m *GetCompactionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.CompactionJobs) > 0 { - for iNdEx := len(m.CompactionJobs) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.CompactionJobs[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if m.Blocks != nil { + size, err := m.Blocks.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *CompactionJob) MarshalVT() (dAtA []byte, err error) { +func (m *BlockTombstones) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -739,12 +834,12 @@ func (m *CompactionJob) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CompactionJob) MarshalToVT(dAtA []byte) (int, error) { +func (m *BlockTombstones) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *CompactionJob) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *BlockTombstones) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -756,59 +851,31 @@ func (m *CompactionJob) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Blocks) > 0 { + for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Blocks[iNdEx]) + copy(dAtA[i:], m.Blocks[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Blocks[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } if m.CompactionLevel != 0 { i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CompactionLevel)) i-- - dAtA[i] = 0x40 + dAtA[i] = 0x20 } - if len(m.TenantId) > 0 { - i -= len(m.TenantId) - copy(dAtA[i:], m.TenantId) - i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TenantId))) + if len(m.Tenant) > 0 { + i -= len(m.Tenant) + copy(dAtA[i:], m.Tenant) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Tenant))) i-- - dAtA[i] = 0x3a + dAtA[i] = 0x1a } if m.Shard != 0 { i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Shard)) i-- - dAtA[i] = 0x30 - } - if m.RaftLogIndex != 0 { - i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RaftLogIndex)) - i-- - dAtA[i] = 0x28 - } - if m.Status != nil { - size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 - } - if len(m.Blocks) > 0 { - for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Blocks[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a - } - } - if m.Options != nil { - size, err := m.Options.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + dAtA[i] = 0x10 } if len(m.Name) > 0 { i -= len(m.Name) @@ -820,7 +887,7 @@ func (m *CompactionJob) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *CompactionOptions) MarshalVT() (dAtA []byte, err error) { +func (m *CompactionJobAssignment) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -833,12 +900,12 @@ func (m *CompactionOptions) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CompactionOptions) MarshalToVT(dAtA []byte) (int, error) { +func (m *CompactionJobAssignment) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *CompactionOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CompactionJobAssignment) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -850,15 +917,27 @@ func (m *CompactionOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.StatusUpdateIntervalSeconds != 0 { - i = protohelpers.EncodeVarint(dAtA, i, uint64(m.StatusUpdateIntervalSeconds)) + if m.LeaseExpiresAt != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LeaseExpiresAt)) + i-- + dAtA[i] = 0x18 + } + if m.Token != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Token)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *CompactionJobStatus) MarshalVT() (dAtA []byte, err error) { +func (m *CompactionJobStatusUpdate) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -871,12 +950,12 @@ func (m *CompactionJobStatus) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CompactionJobStatus) MarshalToVT(dAtA []byte) (int, error) { +func (m *CompactionJobStatusUpdate) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *CompactionJobStatus) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CompactionJobStatusUpdate) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -888,49 +967,37 @@ func (m *CompactionJobStatus) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.TenantId) > 0 { - i -= len(m.TenantId) - copy(dAtA[i:], m.TenantId) - i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TenantId))) - i-- - dAtA[i] = 0x32 - } - if m.Shard != 0 { - i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Shard)) - i-- - dAtA[i] = 0x28 - } - if m.RaftLogIndex != 0 { - i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RaftLogIndex)) - i-- - dAtA[i] = 0x20 - } - if m.CompletedJob != nil { - size, err := m.CompletedJob.MarshalToSizedBufferVT(dAtA[:i]) + if m.CompactedBlocks != nil { + size, err := m.CompactedBlocks.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x22 } if m.Status != 0 { i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Status)) i-- + dAtA[i] = 0x18 + } + if m.Token != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Token)) + i-- dAtA[i] = 0x10 } - if len(m.JobName) > 0 { - i -= len(m.JobName) - copy(dAtA[i:], m.JobName) - i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.JobName))) + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *CompletedJob) MarshalVT() (dAtA []byte, err error) { +func (m *CompactedBlocks) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -943,12 +1010,12 @@ func (m *CompletedJob) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CompletedJob) MarshalToVT(dAtA []byte) (int, error) { +func (m *CompactedBlocks) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *CompletedJob) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CompactedBlocks) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -960,17 +1027,27 @@ func (m *CompletedJob) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Blocks) > 0 { - for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Blocks[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if len(m.NewBlocks) > 0 { + for iNdEx := len(m.NewBlocks) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.NewBlocks[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 + } + } + if m.SourceBlocks != nil { + size, err := m.SourceBlocks.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } @@ -981,8 +1058,8 @@ func (m *PollCompactionJobsRequest) SizeVT() (n int) { } var l int _ = l - if len(m.JobStatusUpdates) > 0 { - for _, e := range m.JobStatusUpdates { + if len(m.StatusUpdates) > 0 { + for _, e := range m.StatusUpdates { l = e.SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } @@ -1006,37 +1083,67 @@ func (m *PollCompactionJobsResponse) SizeVT() (n int) { n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } } + if len(m.Assignments) > 0 { + for _, e := range m.Assignments { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } n += len(m.unknownFields) return n } -func (m *GetCompactionRequest) SizeVT() (n int) { +func (m *CompactionJob) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Shard != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Shard)) + } + l = len(m.Tenant) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CompactionLevel != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CompactionLevel)) + } + if len(m.SourceBlocks) > 0 { + for _, s := range m.SourceBlocks { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Tombstones) > 0 { + for _, e := range m.Tombstones { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } n += len(m.unknownFields) return n } -func (m *GetCompactionResponse) SizeVT() (n int) { +func (m *Tombstones) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.CompactionJobs) > 0 { - for _, e := range m.CompactionJobs { - l = e.SizeVT() - n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) - } + if m.Blocks != nil { + l = m.Blocks.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } n += len(m.unknownFields) return n } -func (m *CompactionJob) SizeVT() (n int) { +func (m *BlockTombstones) SizeVT() (n int) { if m == nil { return 0 } @@ -1046,89 +1153,82 @@ func (m *CompactionJob) SizeVT() (n int) { if l > 0 { n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } - if m.Options != nil { - l = m.Options.SizeVT() - n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) - } - if len(m.Blocks) > 0 { - for _, e := range m.Blocks { - l = e.SizeVT() - n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) - } - } - if m.Status != nil { - l = m.Status.SizeVT() - n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) - } - if m.RaftLogIndex != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.RaftLogIndex)) - } if m.Shard != 0 { n += 1 + protohelpers.SizeOfVarint(uint64(m.Shard)) } - l = len(m.TenantId) + l = len(m.Tenant) if l > 0 { n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } if m.CompactionLevel != 0 { n += 1 + protohelpers.SizeOfVarint(uint64(m.CompactionLevel)) } + if len(m.Blocks) > 0 { + for _, s := range m.Blocks { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } n += len(m.unknownFields) return n } -func (m *CompactionOptions) SizeVT() (n int) { +func (m *CompactionJobAssignment) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - if m.StatusUpdateIntervalSeconds != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.StatusUpdateIntervalSeconds)) + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Token != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Token)) + } + if m.LeaseExpiresAt != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.LeaseExpiresAt)) } n += len(m.unknownFields) return n } -func (m *CompactionJobStatus) SizeVT() (n int) { +func (m *CompactionJobStatusUpdate) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.JobName) + l = len(m.Name) if l > 0 { n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } + if m.Token != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Token)) + } if m.Status != 0 { n += 1 + protohelpers.SizeOfVarint(uint64(m.Status)) } - if m.CompletedJob != nil { - l = m.CompletedJob.SizeVT() - n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) - } - if m.RaftLogIndex != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.RaftLogIndex)) - } - if m.Shard != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.Shard)) - } - l = len(m.TenantId) - if l > 0 { + if m.CompactedBlocks != nil { + l = m.CompactedBlocks.SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } n += len(m.unknownFields) return n } -func (m *CompletedJob) SizeVT() (n int) { +func (m *CompactedBlocks) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Blocks) > 0 { - for _, e := range m.Blocks { + if m.SourceBlocks != nil { + l = m.SourceBlocks.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if len(m.NewBlocks) > 0 { + for _, e := range m.NewBlocks { l = e.SizeVT() n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } @@ -1168,7 +1268,7 @@ func (m *PollCompactionJobsRequest) UnmarshalVT(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JobStatusUpdates", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StatusUpdates", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1195,8 +1295,8 @@ func (m *PollCompactionJobsRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.JobStatusUpdates = append(m.JobStatusUpdates, &CompactionJobStatus{}) - if err := m.JobStatusUpdates[len(m.JobStatusUpdates)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.StatusUpdates = append(m.StatusUpdates, &CompactionJobStatusUpdate{}) + if err := m.StatusUpdates[len(m.StatusUpdates)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1304,111 +1404,9 @@ func (m *PollCompactionJobsResponse) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := protohelpers.Skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return protohelpers.ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetCompactionRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetCompactionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetCompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := protohelpers.Skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return protohelpers.ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetCompactionResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetCompactionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetCompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CompactionJobs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Assignments", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1435,8 +1433,8 @@ func (m *GetCompactionResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.CompactionJobs = append(m.CompactionJobs, &CompactionJob{}) - if err := m.CompactionJobs[len(m.CompactionJobs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Assignments = append(m.Assignments, &CompactionJobAssignment{}) + if err := m.Assignments[len(m.Assignments)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1524,10 +1522,10 @@ func (m *CompactionJob) UnmarshalVT(dAtA []byte) error { m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - var msglen int + m.Shard = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow @@ -1537,33 +1535,16 @@ func (m *CompactionJob) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Shard |= uint32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return protohelpers.ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return protohelpers.ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Options == nil { - m.Options = &CompactionOptions{} - } - if err := m.Options.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tenant", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow @@ -1573,31 +1554,29 @@ func (m *CompactionJob) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return protohelpers.ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return protohelpers.ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Blocks = append(m.Blocks, &BlockMeta{}) - if err := m.Blocks[len(m.Blocks)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Tenant = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactionLevel", wireType) } - var msglen int + m.CompactionLevel = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow @@ -1607,33 +1586,16 @@ func (m *CompactionJob) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.CompactionLevel |= uint32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return protohelpers.ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return protohelpers.ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Status == nil { - m.Status = &CompactionJobStatus{} - } - if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftLogIndex", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceBlocks", wireType) } - m.RaftLogIndex = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow @@ -1643,16 +1605,29 @@ func (m *CompactionJob) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RaftLogIndex |= uint64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceBlocks = append(m.SourceBlocks, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tombstones", wireType) } - m.Shard = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow @@ -1662,16 +1637,82 @@ func (m *CompactionJob) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Shard |= uint32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TenantId", wireType) + if msglen < 0 { + return protohelpers.ErrInvalidLength } - var stringLen uint64 + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tombstones = append(m.Tombstones, &Tombstones{}) + if err := m.Tombstones[len(m.Tombstones)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Tombstones) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Tombstones: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Tombstones: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow @@ -1681,43 +1722,28 @@ func (m *CompactionJob) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return protohelpers.ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return protohelpers.ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.TenantId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CompactionLevel", wireType) + if m.Blocks == nil { + m.Blocks = &BlockTombstones{} } - m.CompactionLevel = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CompactionLevel |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.Blocks.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -1740,7 +1766,7 @@ func (m *CompactionJob) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *CompactionOptions) UnmarshalVT(dAtA []byte) error { +func (m *BlockTombstones) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1763,17 +1789,119 @@ func (m *CompactionOptions) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CompactionOptions: wiretype end group for non-group") + return fmt.Errorf("proto: BlockTombstones: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CompactionOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BlockTombstones: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StatusUpdateIntervalSeconds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + m.Shard = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Shard |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tenant", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tenant = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactionLevel", wireType) } - m.StatusUpdateIntervalSeconds = 0 + m.CompactionLevel = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactionLevel |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow @@ -1783,11 +1911,24 @@ func (m *CompactionOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.StatusUpdateIntervalSeconds |= uint64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blocks = append(m.Blocks, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -1810,7 +1951,7 @@ func (m *CompactionOptions) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *CompactionJobStatus) UnmarshalVT(dAtA []byte) error { +func (m *CompactionJobAssignment) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1833,15 +1974,15 @@ func (m *CompactionJobStatus) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CompactionJobStatus: wiretype end group for non-group") + return fmt.Errorf("proto: CompactionJobAssignment: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CompactionJobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CompactionJobAssignment: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JobName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1869,13 +2010,13 @@ func (m *CompactionJobStatus) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.JobName = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) } - m.Status = 0 + m.Token = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow @@ -1885,16 +2026,86 @@ func (m *CompactionJobStatus) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Status |= CompactionStatus(b&0x7F) << shift + m.Token |= uint64(b&0x7F) << shift if b < 0x80 { break } } case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseExpiresAt", wireType) + } + m.LeaseExpiresAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LeaseExpiresAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactionJobStatusUpdate) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionJobStatusUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionJobStatusUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CompletedJob", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow @@ -1904,33 +2115,29 @@ func (m *CompactionJobStatus) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return protohelpers.ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return protohelpers.ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.CompletedJob == nil { - m.CompletedJob = &CompletedJob{} - } - if err := m.CompletedJob.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftLogIndex", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) } - m.RaftLogIndex = 0 + m.Token = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow @@ -1940,16 +2147,16 @@ func (m *CompactionJobStatus) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RaftLogIndex |= uint64(b&0x7F) << shift + m.Token |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 5: + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - m.Shard = 0 + m.Status = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow @@ -1959,16 +2166,16 @@ func (m *CompactionJobStatus) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Shard |= uint32(b&0x7F) << shift + m.Status |= CompactionJobStatus(b&0x7F) << shift if b < 0x80 { break } } - case 6: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TenantId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CompactedBlocks", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow @@ -1978,23 +2185,27 @@ func (m *CompactionJobStatus) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return protohelpers.ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return protohelpers.ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.TenantId = string(dAtA[iNdEx:postIndex]) + if m.CompactedBlocks == nil { + m.CompactedBlocks = &CompactedBlocks{} + } + if err := m.CompactedBlocks.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -2018,7 +2229,7 @@ func (m *CompactionJobStatus) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *CompletedJob) UnmarshalVT(dAtA []byte) error { +func (m *CompactedBlocks) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2041,15 +2252,51 @@ func (m *CompletedJob) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CompletedJob: wiretype end group for non-group") + return fmt.Errorf("proto: CompactedBlocks: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CompletedJob: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CompactedBlocks: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceBlocks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceBlocks == nil { + m.SourceBlocks = &BlockList{} + } + if err := m.SourceBlocks.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewBlocks", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2076,8 +2323,8 @@ func (m *CompletedJob) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Blocks = append(m.Blocks, &BlockMeta{}) - if err := m.Blocks[len(m.Blocks)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.NewBlocks = append(m.NewBlocks, &BlockMeta{}) + if err := m.NewBlocks[len(m.NewBlocks)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/api/gen/proto/go/metastore/v1/index.pb.go b/api/gen/proto/go/metastore/v1/index.pb.go index c1f8e87e15..25f5085011 100644 --- a/api/gen/proto/go/metastore/v1/index.pb.go +++ b/api/gen/proto/go/metastore/v1/index.pb.go @@ -105,6 +105,100 @@ func (*AddBlockResponse) Descriptor() ([]byte, []int) { return file_metastore_v1_index_proto_rawDescGZIP(), []int{1} } +type GetBlockMetadataRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Blocks *BlockList `protobuf:"bytes,1,opt,name=blocks,proto3" json:"blocks,omitempty"` +} + +func (x *GetBlockMetadataRequest) Reset() { + *x = GetBlockMetadataRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_index_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBlockMetadataRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBlockMetadataRequest) ProtoMessage() {} + +func (x *GetBlockMetadataRequest) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_index_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBlockMetadataRequest.ProtoReflect.Descriptor instead. +func (*GetBlockMetadataRequest) Descriptor() ([]byte, []int) { + return file_metastore_v1_index_proto_rawDescGZIP(), []int{2} +} + +func (x *GetBlockMetadataRequest) GetBlocks() *BlockList { + if x != nil { + return x.Blocks + } + return nil +} + +type GetBlockMetadataResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Blocks []*BlockMeta `protobuf:"bytes,1,rep,name=blocks,proto3" json:"blocks,omitempty"` +} + +func (x *GetBlockMetadataResponse) Reset() { + *x = GetBlockMetadataResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_index_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetBlockMetadataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBlockMetadataResponse) ProtoMessage() {} + +func (x *GetBlockMetadataResponse) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_index_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBlockMetadataResponse.ProtoReflect.Descriptor instead. +func (*GetBlockMetadataResponse) Descriptor() ([]byte, []int) { + return file_metastore_v1_index_proto_rawDescGZIP(), []int{3} +} + +func (x *GetBlockMetadataResponse) GetBlocks() []*BlockMeta { + if x != nil { + return x.Blocks + } + return nil +} + var File_metastore_v1_index_proto protoreflect.FileDescriptor var file_metastore_v1_index_proto_rawDesc = []byte{ @@ -117,25 +211,41 @@ var file_metastore_v1_index_proto_rawDesc = []byte{ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x12, 0x0a, 0x10, 0x41, 0x64, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x5b, 0x0a, 0x0c, 0x49, 0x6e, 0x64, 0x65, - 0x78, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4b, 0x0a, 0x08, 0x41, 0x64, 0x64, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0xb7, 0x01, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x65, - 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x49, 0x6e, 0x64, 0x65, - 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x61, 0x66, 0x61, 0x6e, 0x61, 0x2f, 0x70, 0x79, 0x72, - 0x6f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x76, 0x31, - 0xa2, 0x02, 0x03, 0x4d, 0x58, 0x58, 0xaa, 0x02, 0x0c, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x0c, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x18, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, - 0x02, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4a, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x06, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x73, 0x22, 0x4b, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x2f, 0x0a, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x73, 0x32, 0xc0, 0x01, 0x0a, 0x0c, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x4b, 0x0a, 0x08, 0x41, 0x64, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1d, + 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, + 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, + 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x63, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x25, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x6d, 0x65, 0x74, + 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x42, 0xb7, 0x01, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x65, 0x74, + 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x61, 0x66, 0x61, 0x6e, 0x61, 0x2f, 0x70, 0x79, 0x72, 0x6f, + 0x73, 0x63, 0x6f, 0x70, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2f, 0x76, 0x31, 0x3b, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x76, 0x31, 0xa2, + 0x02, 0x03, 0x4d, 0x58, 0x58, 0xaa, 0x02, 0x0c, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x0c, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x5c, 0x56, 0x31, 0xe2, 0x02, 0x18, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5c, + 0x56, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, + 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -150,21 +260,28 @@ func file_metastore_v1_index_proto_rawDescGZIP() []byte { return file_metastore_v1_index_proto_rawDescData } -var file_metastore_v1_index_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_metastore_v1_index_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_metastore_v1_index_proto_goTypes = []any{ - (*AddBlockRequest)(nil), // 0: metastore.v1.AddBlockRequest - (*AddBlockResponse)(nil), // 1: metastore.v1.AddBlockResponse - (*BlockMeta)(nil), // 2: metastore.v1.BlockMeta + (*AddBlockRequest)(nil), // 0: metastore.v1.AddBlockRequest + (*AddBlockResponse)(nil), // 1: metastore.v1.AddBlockResponse + (*GetBlockMetadataRequest)(nil), // 2: metastore.v1.GetBlockMetadataRequest + (*GetBlockMetadataResponse)(nil), // 3: metastore.v1.GetBlockMetadataResponse + (*BlockMeta)(nil), // 4: metastore.v1.BlockMeta + (*BlockList)(nil), // 5: metastore.v1.BlockList } var file_metastore_v1_index_proto_depIdxs = []int32{ - 2, // 0: metastore.v1.AddBlockRequest.block:type_name -> metastore.v1.BlockMeta - 0, // 1: metastore.v1.IndexService.AddBlock:input_type -> metastore.v1.AddBlockRequest - 1, // 2: metastore.v1.IndexService.AddBlock:output_type -> metastore.v1.AddBlockResponse - 2, // [2:3] is the sub-list for method output_type - 1, // [1:2] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 4, // 0: metastore.v1.AddBlockRequest.block:type_name -> metastore.v1.BlockMeta + 5, // 1: metastore.v1.GetBlockMetadataRequest.blocks:type_name -> metastore.v1.BlockList + 4, // 2: metastore.v1.GetBlockMetadataResponse.blocks:type_name -> metastore.v1.BlockMeta + 0, // 3: metastore.v1.IndexService.AddBlock:input_type -> metastore.v1.AddBlockRequest + 2, // 4: metastore.v1.IndexService.GetBlockMetadata:input_type -> metastore.v1.GetBlockMetadataRequest + 1, // 5: metastore.v1.IndexService.AddBlock:output_type -> metastore.v1.AddBlockResponse + 3, // 6: metastore.v1.IndexService.GetBlockMetadata:output_type -> metastore.v1.GetBlockMetadataResponse + 5, // [5:7] is the sub-list for method output_type + 3, // [3:5] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name } func init() { file_metastore_v1_index_proto_init() } @@ -198,6 +315,30 @@ func file_metastore_v1_index_proto_init() { return nil } } + file_metastore_v1_index_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetBlockMetadataRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_index_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*GetBlockMetadataResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -205,7 +346,7 @@ func file_metastore_v1_index_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_metastore_v1_index_proto_rawDesc, NumEnums: 0, - NumMessages: 2, + NumMessages: 4, NumExtensions: 0, NumServices: 1, }, diff --git a/api/gen/proto/go/metastore/v1/index_vtproto.pb.go b/api/gen/proto/go/metastore/v1/index_vtproto.pb.go index 093b787b47..bb513b4f13 100644 --- a/api/gen/proto/go/metastore/v1/index_vtproto.pb.go +++ b/api/gen/proto/go/metastore/v1/index_vtproto.pb.go @@ -56,6 +56,46 @@ func (m *AddBlockResponse) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *GetBlockMetadataRequest) CloneVT() *GetBlockMetadataRequest { + if m == nil { + return (*GetBlockMetadataRequest)(nil) + } + r := new(GetBlockMetadataRequest) + r.Blocks = m.Blocks.CloneVT() + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetBlockMetadataRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetBlockMetadataResponse) CloneVT() *GetBlockMetadataResponse { + if m == nil { + return (*GetBlockMetadataResponse)(nil) + } + r := new(GetBlockMetadataResponse) + if rhs := m.Blocks; rhs != nil { + tmpContainer := make([]*BlockMeta, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Blocks = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetBlockMetadataResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (this *AddBlockRequest) EqualVT(that *AddBlockRequest) bool { if this == that { return true @@ -91,6 +131,58 @@ func (this *AddBlockResponse) EqualMessageVT(thatMsg proto.Message) bool { } return this.EqualVT(that) } +func (this *GetBlockMetadataRequest) EqualVT(that *GetBlockMetadataRequest) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if !this.Blocks.EqualVT(that.Blocks) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *GetBlockMetadataRequest) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*GetBlockMetadataRequest) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *GetBlockMetadataResponse) EqualVT(that *GetBlockMetadataResponse) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.Blocks) != len(that.Blocks) { + return false + } + for i, vx := range this.Blocks { + vy := that.Blocks[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &BlockMeta{} + } + if q == nil { + q = &BlockMeta{} + } + if !p.EqualVT(q) { + return false + } + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *GetBlockMetadataResponse) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*GetBlockMetadataResponse) + if !ok { + return false + } + return this.EqualVT(that) +} // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. @@ -102,6 +194,7 @@ const _ = grpc.SupportPackageIsVersion7 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type IndexServiceClient interface { AddBlock(ctx context.Context, in *AddBlockRequest, opts ...grpc.CallOption) (*AddBlockResponse, error) + GetBlockMetadata(ctx context.Context, in *GetBlockMetadataRequest, opts ...grpc.CallOption) (*GetBlockMetadataResponse, error) } type indexServiceClient struct { @@ -121,11 +214,21 @@ func (c *indexServiceClient) AddBlock(ctx context.Context, in *AddBlockRequest, return out, nil } +func (c *indexServiceClient) GetBlockMetadata(ctx context.Context, in *GetBlockMetadataRequest, opts ...grpc.CallOption) (*GetBlockMetadataResponse, error) { + out := new(GetBlockMetadataResponse) + err := c.cc.Invoke(ctx, "/metastore.v1.IndexService/GetBlockMetadata", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // IndexServiceServer is the server API for IndexService service. // All implementations must embed UnimplementedIndexServiceServer // for forward compatibility type IndexServiceServer interface { AddBlock(context.Context, *AddBlockRequest) (*AddBlockResponse, error) + GetBlockMetadata(context.Context, *GetBlockMetadataRequest) (*GetBlockMetadataResponse, error) mustEmbedUnimplementedIndexServiceServer() } @@ -136,6 +239,9 @@ type UnimplementedIndexServiceServer struct { func (UnimplementedIndexServiceServer) AddBlock(context.Context, *AddBlockRequest) (*AddBlockResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method AddBlock not implemented") } +func (UnimplementedIndexServiceServer) GetBlockMetadata(context.Context, *GetBlockMetadataRequest) (*GetBlockMetadataResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetBlockMetadata not implemented") +} func (UnimplementedIndexServiceServer) mustEmbedUnimplementedIndexServiceServer() {} // UnsafeIndexServiceServer may be embedded to opt out of forward compatibility for this service. @@ -167,6 +273,24 @@ func _IndexService_AddBlock_Handler(srv interface{}, ctx context.Context, dec fu return interceptor(ctx, in, info, handler) } +func _IndexService_GetBlockMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetBlockMetadataRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IndexServiceServer).GetBlockMetadata(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/metastore.v1.IndexService/GetBlockMetadata", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IndexServiceServer).GetBlockMetadata(ctx, req.(*GetBlockMetadataRequest)) + } + return interceptor(ctx, in, info, handler) +} + // IndexService_ServiceDesc is the grpc.ServiceDesc for IndexService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -178,6 +302,10 @@ var IndexService_ServiceDesc = grpc.ServiceDesc{ MethodName: "AddBlock", Handler: _IndexService_AddBlock_Handler, }, + { + MethodName: "GetBlockMetadata", + Handler: _IndexService_GetBlockMetadata_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "metastore/v1/index.proto", @@ -259,6 +387,94 @@ func (m *AddBlockResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *GetBlockMetadataRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetBlockMetadataRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetBlockMetadataRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Blocks != nil { + size, err := m.Blocks.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetBlockMetadataResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetBlockMetadataResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetBlockMetadataResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Blocks) > 0 { + for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Blocks[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *AddBlockRequest) SizeVT() (n int) { if m == nil { return 0 @@ -283,6 +499,36 @@ func (m *AddBlockResponse) SizeVT() (n int) { return n } +func (m *GetBlockMetadataRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Blocks != nil { + l = m.Blocks.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetBlockMetadataResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Blocks) > 0 { + for _, e := range m.Blocks { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + func (m *AddBlockRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -421,3 +667,175 @@ func (m *AddBlockResponse) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *GetBlockMetadataRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetBlockMetadataRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetBlockMetadataRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Blocks == nil { + m.Blocks = &BlockList{} + } + if err := m.Blocks.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetBlockMetadataResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetBlockMetadataResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetBlockMetadataResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blocks = append(m.Blocks, &BlockMeta{}) + if err := m.Blocks[len(m.Blocks)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/api/gen/proto/go/metastore/v1/metastorev1connect/index.connect.go b/api/gen/proto/go/metastore/v1/metastorev1connect/index.connect.go index a27c3caf93..404d83a644 100644 --- a/api/gen/proto/go/metastore/v1/metastorev1connect/index.connect.go +++ b/api/gen/proto/go/metastore/v1/metastorev1connect/index.connect.go @@ -35,17 +35,22 @@ const ( const ( // IndexServiceAddBlockProcedure is the fully-qualified name of the IndexService's AddBlock RPC. IndexServiceAddBlockProcedure = "/metastore.v1.IndexService/AddBlock" + // IndexServiceGetBlockMetadataProcedure is the fully-qualified name of the IndexService's + // GetBlockMetadata RPC. + IndexServiceGetBlockMetadataProcedure = "/metastore.v1.IndexService/GetBlockMetadata" ) // These variables are the protoreflect.Descriptor objects for the RPCs defined in this package. var ( - indexServiceServiceDescriptor = v1.File_metastore_v1_index_proto.Services().ByName("IndexService") - indexServiceAddBlockMethodDescriptor = indexServiceServiceDescriptor.Methods().ByName("AddBlock") + indexServiceServiceDescriptor = v1.File_metastore_v1_index_proto.Services().ByName("IndexService") + indexServiceAddBlockMethodDescriptor = indexServiceServiceDescriptor.Methods().ByName("AddBlock") + indexServiceGetBlockMetadataMethodDescriptor = indexServiceServiceDescriptor.Methods().ByName("GetBlockMetadata") ) // IndexServiceClient is a client for the metastore.v1.IndexService service. type IndexServiceClient interface { AddBlock(context.Context, *connect.Request[v1.AddBlockRequest]) (*connect.Response[v1.AddBlockResponse], error) + GetBlockMetadata(context.Context, *connect.Request[v1.GetBlockMetadataRequest]) (*connect.Response[v1.GetBlockMetadataResponse], error) } // NewIndexServiceClient constructs a client for the metastore.v1.IndexService service. By default, @@ -64,12 +69,19 @@ func NewIndexServiceClient(httpClient connect.HTTPClient, baseURL string, opts . connect.WithSchema(indexServiceAddBlockMethodDescriptor), connect.WithClientOptions(opts...), ), + getBlockMetadata: connect.NewClient[v1.GetBlockMetadataRequest, v1.GetBlockMetadataResponse]( + httpClient, + baseURL+IndexServiceGetBlockMetadataProcedure, + connect.WithSchema(indexServiceGetBlockMetadataMethodDescriptor), + connect.WithClientOptions(opts...), + ), } } // indexServiceClient implements IndexServiceClient. type indexServiceClient struct { - addBlock *connect.Client[v1.AddBlockRequest, v1.AddBlockResponse] + addBlock *connect.Client[v1.AddBlockRequest, v1.AddBlockResponse] + getBlockMetadata *connect.Client[v1.GetBlockMetadataRequest, v1.GetBlockMetadataResponse] } // AddBlock calls metastore.v1.IndexService.AddBlock. @@ -77,9 +89,15 @@ func (c *indexServiceClient) AddBlock(ctx context.Context, req *connect.Request[ return c.addBlock.CallUnary(ctx, req) } +// GetBlockMetadata calls metastore.v1.IndexService.GetBlockMetadata. +func (c *indexServiceClient) GetBlockMetadata(ctx context.Context, req *connect.Request[v1.GetBlockMetadataRequest]) (*connect.Response[v1.GetBlockMetadataResponse], error) { + return c.getBlockMetadata.CallUnary(ctx, req) +} + // IndexServiceHandler is an implementation of the metastore.v1.IndexService service. type IndexServiceHandler interface { AddBlock(context.Context, *connect.Request[v1.AddBlockRequest]) (*connect.Response[v1.AddBlockResponse], error) + GetBlockMetadata(context.Context, *connect.Request[v1.GetBlockMetadataRequest]) (*connect.Response[v1.GetBlockMetadataResponse], error) } // NewIndexServiceHandler builds an HTTP handler from the service implementation. It returns the @@ -94,10 +112,18 @@ func NewIndexServiceHandler(svc IndexServiceHandler, opts ...connect.HandlerOpti connect.WithSchema(indexServiceAddBlockMethodDescriptor), connect.WithHandlerOptions(opts...), ) + indexServiceGetBlockMetadataHandler := connect.NewUnaryHandler( + IndexServiceGetBlockMetadataProcedure, + svc.GetBlockMetadata, + connect.WithSchema(indexServiceGetBlockMetadataMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) return "/metastore.v1.IndexService/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case IndexServiceAddBlockProcedure: indexServiceAddBlockHandler.ServeHTTP(w, r) + case IndexServiceGetBlockMetadataProcedure: + indexServiceGetBlockMetadataHandler.ServeHTTP(w, r) default: http.NotFound(w, r) } @@ -110,3 +136,7 @@ type UnimplementedIndexServiceHandler struct{} func (UnimplementedIndexServiceHandler) AddBlock(context.Context, *connect.Request[v1.AddBlockRequest]) (*connect.Response[v1.AddBlockResponse], error) { return nil, connect.NewError(connect.CodeUnimplemented, errors.New("metastore.v1.IndexService.AddBlock is not implemented")) } + +func (UnimplementedIndexServiceHandler) GetBlockMetadata(context.Context, *connect.Request[v1.GetBlockMetadataRequest]) (*connect.Response[v1.GetBlockMetadataResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("metastore.v1.IndexService.GetBlockMetadata is not implemented")) +} diff --git a/api/gen/proto/go/metastore/v1/metastorev1connect/index.connect.mux.go b/api/gen/proto/go/metastore/v1/metastorev1connect/index.connect.mux.go index 6ddd38c790..d8274fcda9 100644 --- a/api/gen/proto/go/metastore/v1/metastorev1connect/index.connect.mux.go +++ b/api/gen/proto/go/metastore/v1/metastorev1connect/index.connect.mux.go @@ -24,4 +24,9 @@ func RegisterIndexServiceHandler(mux *mux.Router, svc IndexServiceHandler, opts svc.AddBlock, opts..., )) + mux.Handle("/metastore.v1.IndexService/GetBlockMetadata", connect.NewUnaryHandler( + "/metastore.v1.IndexService/GetBlockMetadata", + svc.GetBlockMetadata, + opts..., + )) } diff --git a/api/gen/proto/go/metastore/v1/raft_log/raft_log.pb.go b/api/gen/proto/go/metastore/v1/raft_log/raft_log.pb.go index d2b8859794..d98d173413 100644 --- a/api/gen/proto/go/metastore/v1/raft_log/raft_log.pb.go +++ b/api/gen/proto/go/metastore/v1/raft_log/raft_log.pb.go @@ -7,6 +7,7 @@ package raft_log import ( + v1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -23,25 +24,25 @@ const ( type RaftCommand int32 const ( - RaftCommand_RAFT_COMMAND_UNKNOWN RaftCommand = 0 - RaftCommand_RAFT_COMMAND_ADD_BLOCK RaftCommand = 1 - RaftCommand_RAFT_COMMAND_POLL_COMPACTION_JOBS RaftCommand = 2 - RaftCommand_RAFT_COMMAND_CLEAN_BLOCKS RaftCommand = 3 + RaftCommand_RAFT_COMMAND_UNKNOWN RaftCommand = 0 + RaftCommand_RAFT_COMMAND_ADD_BLOCK_METADATA RaftCommand = 1 + RaftCommand_RAFT_COMMAND_GET_COMPACTION_PLAN_UPDATE RaftCommand = 2 + RaftCommand_RAFT_COMMAND_UPDATE_COMPACTION_PLAN RaftCommand = 3 ) // Enum value maps for RaftCommand. var ( RaftCommand_name = map[int32]string{ 0: "RAFT_COMMAND_UNKNOWN", - 1: "RAFT_COMMAND_ADD_BLOCK", - 2: "RAFT_COMMAND_POLL_COMPACTION_JOBS", - 3: "RAFT_COMMAND_CLEAN_BLOCKS", + 1: "RAFT_COMMAND_ADD_BLOCK_METADATA", + 2: "RAFT_COMMAND_GET_COMPACTION_PLAN_UPDATE", + 3: "RAFT_COMMAND_UPDATE_COMPACTION_PLAN", } RaftCommand_value = map[string]int32{ - "RAFT_COMMAND_UNKNOWN": 0, - "RAFT_COMMAND_ADD_BLOCK": 1, - "RAFT_COMMAND_POLL_COMPACTION_JOBS": 2, - "RAFT_COMMAND_CLEAN_BLOCKS": 3, + "RAFT_COMMAND_UNKNOWN": 0, + "RAFT_COMMAND_ADD_BLOCK_METADATA": 1, + "RAFT_COMMAND_GET_COMPACTION_PLAN_UPDATE": 2, + "RAFT_COMMAND_UPDATE_COMPACTION_PLAN": 3, } ) @@ -72,16 +73,16 @@ func (RaftCommand) EnumDescriptor() ([]byte, []int) { return file_metastore_v1_raft_log_raft_log_proto_rawDescGZIP(), []int{0} } -type CleanBlocksRequest struct { +type AddBlockMetadataRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + Metadata *v1.BlockMeta `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` } -func (x *CleanBlocksRequest) Reset() { - *x = CleanBlocksRequest{} +func (x *AddBlockMetadataRequest) Reset() { + *x = AddBlockMetadataRequest{} if protoimpl.UnsafeEnabled { mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -89,13 +90,13 @@ func (x *CleanBlocksRequest) Reset() { } } -func (x *CleanBlocksRequest) String() string { +func (x *AddBlockMetadataRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CleanBlocksRequest) ProtoMessage() {} +func (*AddBlockMetadataRequest) ProtoMessage() {} -func (x *CleanBlocksRequest) ProtoReflect() protoreflect.Message { +func (x *AddBlockMetadataRequest) ProtoReflect() protoreflect.Message { mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -107,47 +108,966 @@ func (x *CleanBlocksRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CleanBlocksRequest.ProtoReflect.Descriptor instead. -func (*CleanBlocksRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use AddBlockMetadataRequest.ProtoReflect.Descriptor instead. +func (*AddBlockMetadataRequest) Descriptor() ([]byte, []int) { return file_metastore_v1_raft_log_raft_log_proto_rawDescGZIP(), []int{0} } -func (x *CleanBlocksRequest) GetRequestId() string { +func (x *AddBlockMetadataRequest) GetMetadata() *v1.BlockMeta { if x != nil { - return x.RequestId + return x.Metadata + } + return nil +} + +type AddBlockMetadataResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *AddBlockMetadataResponse) Reset() { + *x = AddBlockMetadataResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AddBlockMetadataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddBlockMetadataResponse) ProtoMessage() {} + +func (x *AddBlockMetadataResponse) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddBlockMetadataResponse.ProtoReflect.Descriptor instead. +func (*AddBlockMetadataResponse) Descriptor() ([]byte, []int) { + return file_metastore_v1_raft_log_raft_log_proto_rawDescGZIP(), []int{1} +} + +// GetCompactionPlanUpdateRequest requests CompactionPlanUpdate. +// The resulting plan should be proposed to the raft members. +// This is a read-only operation: it MUST NOT alter the state. +type GetCompactionPlanUpdateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // CompactionJobStatusUpdate is a change + // requested by the compaction worker. + StatusUpdates []*CompactionJobStatusUpdate `protobuf:"bytes,1,rep,name=status_updates,json=statusUpdates,proto3" json:"status_updates,omitempty"` + AssignJobsMax uint32 `protobuf:"varint,2,opt,name=assign_jobs_max,json=assignJobsMax,proto3" json:"assign_jobs_max,omitempty"` +} + +func (x *GetCompactionPlanUpdateRequest) Reset() { + *x = GetCompactionPlanUpdateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetCompactionPlanUpdateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCompactionPlanUpdateRequest) ProtoMessage() {} + +func (x *GetCompactionPlanUpdateRequest) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCompactionPlanUpdateRequest.ProtoReflect.Descriptor instead. +func (*GetCompactionPlanUpdateRequest) Descriptor() ([]byte, []int) { + return file_metastore_v1_raft_log_raft_log_proto_rawDescGZIP(), []int{2} +} + +func (x *GetCompactionPlanUpdateRequest) GetStatusUpdates() []*CompactionJobStatusUpdate { + if x != nil { + return x.StatusUpdates + } + return nil +} + +func (x *GetCompactionPlanUpdateRequest) GetAssignJobsMax() uint32 { + if x != nil { + return x.AssignJobsMax + } + return 0 +} + +type CompactionJobStatusUpdate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Token uint64 `protobuf:"varint,2,opt,name=token,proto3" json:"token,omitempty"` + Status v1.CompactionJobStatus `protobuf:"varint,3,opt,name=status,proto3,enum=metastore.v1.CompactionJobStatus" json:"status,omitempty"` +} + +func (x *CompactionJobStatusUpdate) Reset() { + *x = CompactionJobStatusUpdate{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CompactionJobStatusUpdate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CompactionJobStatusUpdate) ProtoMessage() {} + +func (x *CompactionJobStatusUpdate) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CompactionJobStatusUpdate.ProtoReflect.Descriptor instead. +func (*CompactionJobStatusUpdate) Descriptor() ([]byte, []int) { + return file_metastore_v1_raft_log_raft_log_proto_rawDescGZIP(), []int{3} +} + +func (x *CompactionJobStatusUpdate) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CompactionJobStatusUpdate) GetToken() uint64 { + if x != nil { + return x.Token + } + return 0 +} + +func (x *CompactionJobStatusUpdate) GetStatus() v1.CompactionJobStatus { + if x != nil { + return x.Status + } + return v1.CompactionJobStatus(0) +} + +// GetCompactionPlanUpdateResponse includes the planned change. +// The plan should be proposed to the raft members. +type GetCompactionPlanUpdateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Term uint64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"` + PlanUpdate *CompactionPlanUpdate `protobuf:"bytes,2,opt,name=plan_update,json=planUpdate,proto3" json:"plan_update,omitempty"` +} + +func (x *GetCompactionPlanUpdateResponse) Reset() { + *x = GetCompactionPlanUpdateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetCompactionPlanUpdateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCompactionPlanUpdateResponse) ProtoMessage() {} + +func (x *GetCompactionPlanUpdateResponse) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCompactionPlanUpdateResponse.ProtoReflect.Descriptor instead. +func (*GetCompactionPlanUpdateResponse) Descriptor() ([]byte, []int) { + return file_metastore_v1_raft_log_raft_log_proto_rawDescGZIP(), []int{4} +} + +func (x *GetCompactionPlanUpdateResponse) GetTerm() uint64 { + if x != nil { + return x.Term + } + return 0 +} + +func (x *GetCompactionPlanUpdateResponse) GetPlanUpdate() *CompactionPlanUpdate { + if x != nil { + return x.PlanUpdate + } + return nil +} + +type CompactionPlanUpdate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NewJobs []*NewCompactionJob `protobuf:"bytes,1,rep,name=new_jobs,json=newJobs,proto3" json:"new_jobs,omitempty"` + AssignedJobs []*AssignedCompactionJob `protobuf:"bytes,2,rep,name=assigned_jobs,json=assignedJobs,proto3" json:"assigned_jobs,omitempty"` + UpdatedJobs []*UpdatedCompactionJob `protobuf:"bytes,3,rep,name=updated_jobs,json=updatedJobs,proto3" json:"updated_jobs,omitempty"` + CompletedJobs []*CompletedCompactionJob `protobuf:"bytes,4,rep,name=completed_jobs,json=completedJobs,proto3" json:"completed_jobs,omitempty"` +} + +func (x *CompactionPlanUpdate) Reset() { + *x = CompactionPlanUpdate{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CompactionPlanUpdate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CompactionPlanUpdate) ProtoMessage() {} + +func (x *CompactionPlanUpdate) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CompactionPlanUpdate.ProtoReflect.Descriptor instead. +func (*CompactionPlanUpdate) Descriptor() ([]byte, []int) { + return file_metastore_v1_raft_log_raft_log_proto_rawDescGZIP(), []int{5} +} + +func (x *CompactionPlanUpdate) GetNewJobs() []*NewCompactionJob { + if x != nil { + return x.NewJobs + } + return nil +} + +func (x *CompactionPlanUpdate) GetAssignedJobs() []*AssignedCompactionJob { + if x != nil { + return x.AssignedJobs + } + return nil +} + +func (x *CompactionPlanUpdate) GetUpdatedJobs() []*UpdatedCompactionJob { + if x != nil { + return x.UpdatedJobs + } + return nil +} + +func (x *CompactionPlanUpdate) GetCompletedJobs() []*CompletedCompactionJob { + if x != nil { + return x.CompletedJobs + } + return nil +} + +type NewCompactionJob struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + State *CompactionJobState `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + Plan *CompactionJobPlan `protobuf:"bytes,2,opt,name=plan,proto3" json:"plan,omitempty"` +} + +func (x *NewCompactionJob) Reset() { + *x = NewCompactionJob{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NewCompactionJob) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NewCompactionJob) ProtoMessage() {} + +func (x *NewCompactionJob) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NewCompactionJob.ProtoReflect.Descriptor instead. +func (*NewCompactionJob) Descriptor() ([]byte, []int) { + return file_metastore_v1_raft_log_raft_log_proto_rawDescGZIP(), []int{6} +} + +func (x *NewCompactionJob) GetState() *CompactionJobState { + if x != nil { + return x.State + } + return nil +} + +func (x *NewCompactionJob) GetPlan() *CompactionJobPlan { + if x != nil { + return x.Plan + } + return nil +} + +type AssignedCompactionJob struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + State *CompactionJobState `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + Plan *CompactionJobPlan `protobuf:"bytes,2,opt,name=plan,proto3" json:"plan,omitempty"` +} + +func (x *AssignedCompactionJob) Reset() { + *x = AssignedCompactionJob{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AssignedCompactionJob) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssignedCompactionJob) ProtoMessage() {} + +func (x *AssignedCompactionJob) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AssignedCompactionJob.ProtoReflect.Descriptor instead. +func (*AssignedCompactionJob) Descriptor() ([]byte, []int) { + return file_metastore_v1_raft_log_raft_log_proto_rawDescGZIP(), []int{7} +} + +func (x *AssignedCompactionJob) GetState() *CompactionJobState { + if x != nil { + return x.State + } + return nil +} + +func (x *AssignedCompactionJob) GetPlan() *CompactionJobPlan { + if x != nil { + return x.Plan + } + return nil +} + +type UpdatedCompactionJob struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + State *CompactionJobState `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` +} + +func (x *UpdatedCompactionJob) Reset() { + *x = UpdatedCompactionJob{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdatedCompactionJob) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdatedCompactionJob) ProtoMessage() {} + +func (x *UpdatedCompactionJob) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdatedCompactionJob.ProtoReflect.Descriptor instead. +func (*UpdatedCompactionJob) Descriptor() ([]byte, []int) { + return file_metastore_v1_raft_log_raft_log_proto_rawDescGZIP(), []int{8} +} + +func (x *UpdatedCompactionJob) GetState() *CompactionJobState { + if x != nil { + return x.State + } + return nil +} + +type CompletedCompactionJob struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + State *CompactionJobState `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + CompactedBlocks *v1.CompactedBlocks `protobuf:"bytes,2,opt,name=compacted_blocks,json=compactedBlocks,proto3" json:"compacted_blocks,omitempty"` +} + +func (x *CompletedCompactionJob) Reset() { + *x = CompletedCompactionJob{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CompletedCompactionJob) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CompletedCompactionJob) ProtoMessage() {} + +func (x *CompletedCompactionJob) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CompletedCompactionJob.ProtoReflect.Descriptor instead. +func (*CompletedCompactionJob) Descriptor() ([]byte, []int) { + return file_metastore_v1_raft_log_raft_log_proto_rawDescGZIP(), []int{9} +} + +func (x *CompletedCompactionJob) GetState() *CompactionJobState { + if x != nil { + return x.State + } + return nil +} + +func (x *CompletedCompactionJob) GetCompactedBlocks() *v1.CompactedBlocks { + if x != nil { + return x.CompactedBlocks + } + return nil +} + +// CompactionJobState is produced in response to +// the compaction worker status update request. +// +// Compaction level and other attributes that +// affect the scheduling order or status update +// handling should be included into the message. +type CompactionJobState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + CompactionLevel uint32 `protobuf:"varint,2,opt,name=compaction_level,json=compactionLevel,proto3" json:"compaction_level,omitempty"` + Status v1.CompactionJobStatus `protobuf:"varint,3,opt,name=status,proto3,enum=metastore.v1.CompactionJobStatus" json:"status,omitempty"` + Token uint64 `protobuf:"varint,4,opt,name=token,proto3" json:"token,omitempty"` + LeaseExpiresAt int64 `protobuf:"varint,5,opt,name=lease_expires_at,json=leaseExpiresAt,proto3" json:"lease_expires_at,omitempty"` + AddedAt int64 `protobuf:"varint,6,opt,name=added_at,json=addedAt,proto3" json:"added_at,omitempty"` + Failures uint32 `protobuf:"varint,7,opt,name=failures,proto3" json:"failures,omitempty"` +} + +func (x *CompactionJobState) Reset() { + *x = CompactionJobState{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CompactionJobState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CompactionJobState) ProtoMessage() {} + +func (x *CompactionJobState) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CompactionJobState.ProtoReflect.Descriptor instead. +func (*CompactionJobState) Descriptor() ([]byte, []int) { + return file_metastore_v1_raft_log_raft_log_proto_rawDescGZIP(), []int{10} +} + +func (x *CompactionJobState) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CompactionJobState) GetCompactionLevel() uint32 { + if x != nil { + return x.CompactionLevel + } + return 0 +} + +func (x *CompactionJobState) GetStatus() v1.CompactionJobStatus { + if x != nil { + return x.Status + } + return v1.CompactionJobStatus(0) +} + +func (x *CompactionJobState) GetToken() uint64 { + if x != nil { + return x.Token + } + return 0 +} + +func (x *CompactionJobState) GetLeaseExpiresAt() int64 { + if x != nil { + return x.LeaseExpiresAt + } + return 0 +} + +func (x *CompactionJobState) GetAddedAt() int64 { + if x != nil { + return x.AddedAt + } + return 0 +} + +func (x *CompactionJobState) GetFailures() uint32 { + if x != nil { + return x.Failures + } + return 0 +} + +type CompactionJobPlan struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Blocks to be compacted. + Tenant string `protobuf:"bytes,2,opt,name=tenant,proto3" json:"tenant,omitempty"` + Shard uint32 `protobuf:"varint,3,opt,name=shard,proto3" json:"shard,omitempty"` + CompactionLevel uint32 `protobuf:"varint,4,opt,name=compaction_level,json=compactionLevel,proto3" json:"compaction_level,omitempty"` + SourceBlocks []string `protobuf:"bytes,5,rep,name=source_blocks,json=sourceBlocks,proto3" json:"source_blocks,omitempty"` + // Objects to be deleted. + Tombstones []*v1.Tombstones `protobuf:"bytes,6,rep,name=tombstones,proto3" json:"tombstones,omitempty"` +} + +func (x *CompactionJobPlan) Reset() { + *x = CompactionJobPlan{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CompactionJobPlan) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CompactionJobPlan) ProtoMessage() {} + +func (x *CompactionJobPlan) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CompactionJobPlan.ProtoReflect.Descriptor instead. +func (*CompactionJobPlan) Descriptor() ([]byte, []int) { + return file_metastore_v1_raft_log_raft_log_proto_rawDescGZIP(), []int{11} +} + +func (x *CompactionJobPlan) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CompactionJobPlan) GetTenant() string { + if x != nil { + return x.Tenant } return "" } +func (x *CompactionJobPlan) GetShard() uint32 { + if x != nil { + return x.Shard + } + return 0 +} + +func (x *CompactionJobPlan) GetCompactionLevel() uint32 { + if x != nil { + return x.CompactionLevel + } + return 0 +} + +func (x *CompactionJobPlan) GetSourceBlocks() []string { + if x != nil { + return x.SourceBlocks + } + return nil +} + +func (x *CompactionJobPlan) GetTombstones() []*v1.Tombstones { + if x != nil { + return x.Tombstones + } + return nil +} + +// UpdateCompactionPlanRequest proposes compaction plan changes. +type UpdateCompactionPlanRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Term uint64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"` + PlanUpdate *CompactionPlanUpdate `protobuf:"bytes,2,opt,name=plan_update,json=planUpdate,proto3" json:"plan_update,omitempty"` +} + +func (x *UpdateCompactionPlanRequest) Reset() { + *x = UpdateCompactionPlanRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateCompactionPlanRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateCompactionPlanRequest) ProtoMessage() {} + +func (x *UpdateCompactionPlanRequest) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateCompactionPlanRequest.ProtoReflect.Descriptor instead. +func (*UpdateCompactionPlanRequest) Descriptor() ([]byte, []int) { + return file_metastore_v1_raft_log_raft_log_proto_rawDescGZIP(), []int{12} +} + +func (x *UpdateCompactionPlanRequest) GetTerm() uint64 { + if x != nil { + return x.Term + } + return 0 +} + +func (x *UpdateCompactionPlanRequest) GetPlanUpdate() *CompactionPlanUpdate { + if x != nil { + return x.PlanUpdate + } + return nil +} + +type UpdateCompactionPlanResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PlanUpdate *CompactionPlanUpdate `protobuf:"bytes,1,opt,name=plan_update,json=planUpdate,proto3" json:"plan_update,omitempty"` +} + +func (x *UpdateCompactionPlanResponse) Reset() { + *x = UpdateCompactionPlanResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateCompactionPlanResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateCompactionPlanResponse) ProtoMessage() {} + +func (x *UpdateCompactionPlanResponse) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_raft_log_raft_log_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateCompactionPlanResponse.ProtoReflect.Descriptor instead. +func (*UpdateCompactionPlanResponse) Descriptor() ([]byte, []int) { + return file_metastore_v1_raft_log_raft_log_proto_rawDescGZIP(), []int{13} +} + +func (x *UpdateCompactionPlanResponse) GetPlanUpdate() *CompactionPlanUpdate { + if x != nil { + return x.PlanUpdate + } + return nil +} + var File_metastore_v1_raft_log_raft_log_proto protoreflect.FileDescriptor var file_metastore_v1_raft_log_raft_log_proto_rawDesc = []byte{ 0x0a, 0x24, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x2f, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, - 0x22, 0x33, 0x0a, 0x12, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x49, 0x64, 0x2a, 0x89, 0x01, 0x0a, 0x0b, 0x52, 0x61, 0x66, 0x74, 0x43, 0x6f, - 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x41, 0x46, 0x54, 0x5f, 0x43, 0x4f, - 0x4d, 0x4d, 0x41, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x1a, 0x0a, 0x16, 0x52, 0x41, 0x46, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, 0x41, 0x4e, 0x44, 0x5f, - 0x41, 0x44, 0x44, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x25, 0x0a, 0x21, 0x52, - 0x41, 0x46, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, 0x41, 0x4e, 0x44, 0x5f, 0x50, 0x4f, 0x4c, 0x4c, - 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4a, 0x4f, 0x42, 0x53, - 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x41, 0x46, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, 0x41, - 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x45, 0x41, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x53, 0x10, - 0x03, 0x42, 0x9d, 0x01, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, - 0x6f, 0x67, 0x42, 0x0c, 0x52, 0x61, 0x66, 0x74, 0x4c, 0x6f, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x43, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, - 0x72, 0x61, 0x66, 0x61, 0x6e, 0x61, 0x2f, 0x70, 0x79, 0x72, 0x6f, 0x73, 0x63, 0x6f, 0x70, 0x65, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, - 0x6f, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, - 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0xa2, 0x02, 0x03, 0x52, 0x58, 0x58, 0xaa, 0x02, 0x07, - 0x52, 0x61, 0x66, 0x74, 0x4c, 0x6f, 0x67, 0xca, 0x02, 0x07, 0x52, 0x61, 0x66, 0x74, 0x4c, 0x6f, - 0x67, 0xe2, 0x02, 0x13, 0x52, 0x61, 0x66, 0x74, 0x4c, 0x6f, 0x67, 0x5c, 0x47, 0x50, 0x42, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x07, 0x52, 0x61, 0x66, 0x74, 0x4c, 0x6f, - 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x1a, 0x1c, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x63, + 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, + 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4e, 0x0a, 0x17, 0x41, 0x64, 0x64, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x1a, 0x0a, 0x18, 0x41, 0x64, 0x64, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x94, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6c, 0x61, 0x6e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x5f, 0x6a, 0x6f, + 0x62, 0x73, 0x5f, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x61, 0x73, + 0x73, 0x69, 0x67, 0x6e, 0x4a, 0x6f, 0x62, 0x73, 0x4d, 0x61, 0x78, 0x22, 0x80, 0x01, 0x0a, 0x19, + 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x76, + 0x0a, 0x1f, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x6c, 0x61, 0x6e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x04, 0x74, 0x65, 0x72, 0x6d, 0x12, 0x3f, 0x0a, 0x0b, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x61, 0x66, + 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x6c, 0x61, 0x6e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x70, 0x6c, 0x61, 0x6e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0x9f, 0x02, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6c, 0x61, 0x6e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, + 0x35, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f, 0x6a, 0x6f, 0x62, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x2e, 0x4e, 0x65, 0x77, + 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x52, 0x07, 0x6e, + 0x65, 0x77, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, + 0x65, 0x64, 0x5f, 0x6a, 0x6f, 0x62, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, + 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x52, 0x0c, + 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x73, 0x12, 0x41, 0x0a, 0x0c, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6a, 0x6f, 0x62, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, + 0x6f, 0x62, 0x52, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x73, 0x12, + 0x47, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6a, 0x6f, 0x62, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, + 0x6f, 0x67, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x52, 0x0d, 0x63, 0x6f, 0x6d, 0x70, 0x6c, + 0x65, 0x74, 0x65, 0x64, 0x4a, 0x6f, 0x62, 0x73, 0x22, 0x77, 0x0a, 0x10, 0x4e, 0x65, 0x77, 0x43, + 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x12, 0x32, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x72, 0x61, + 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x2f, 0x0a, 0x04, 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x04, 0x70, 0x6c, 0x61, + 0x6e, 0x22, 0x7c, 0x0a, 0x15, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x43, 0x6f, 0x6d, + 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x72, 0x61, 0x66, 0x74, + 0x5f, 0x6c, 0x6f, 0x67, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, + 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, + 0x0a, 0x04, 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, + 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x04, 0x70, 0x6c, 0x61, 0x6e, 0x22, + 0x4a, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, + 0x67, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x96, 0x01, 0x0a, 0x16, + 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, + 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6f, + 0x6d, 0x70, 0x61, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x73, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x65, 0x64, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x73, 0x22, 0x85, 0x02, 0x0a, 0x12, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x6d, 0x65, 0x74, + 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x6c, + 0x65, 0x61, 0x73, 0x65, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x45, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x64, 0x64, 0x65, 0x64, 0x5f, 0x61, + 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x61, 0x64, 0x64, 0x65, 0x64, 0x41, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x08, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x22, 0xdf, 0x01, 0x0a, + 0x11, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, 0x50, 0x6c, + 0x61, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, + 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x23, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x38, 0x0a, 0x0a, 0x74, 0x6f, 0x6d, 0x62, 0x73, 0x74, 0x6f, 0x6e, + 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x6d, 0x62, 0x73, 0x74, 0x6f, 0x6e, + 0x65, 0x73, 0x52, 0x0a, 0x74, 0x6f, 0x6d, 0x62, 0x73, 0x74, 0x6f, 0x6e, 0x65, 0x73, 0x22, 0x72, + 0x0a, 0x1b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x65, 0x72, + 0x6d, 0x12, 0x3f, 0x0a, 0x0b, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, + 0x67, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6c, 0x61, 0x6e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x70, 0x6c, 0x61, 0x6e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x22, 0x5f, 0x0a, 0x1c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6d, 0x70, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0b, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, + 0x6f, 0x67, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6c, 0x61, + 0x6e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x70, 0x6c, 0x61, 0x6e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x2a, 0xa2, 0x01, 0x0a, 0x0b, 0x52, 0x61, 0x66, 0x74, 0x43, 0x6f, 0x6d, 0x6d, + 0x61, 0x6e, 0x64, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x41, 0x46, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, + 0x41, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x23, 0x0a, + 0x1f, 0x52, 0x41, 0x46, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, 0x41, 0x4e, 0x44, 0x5f, 0x41, 0x44, + 0x44, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, + 0x10, 0x01, 0x12, 0x2b, 0x0a, 0x27, 0x52, 0x41, 0x46, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, 0x41, + 0x4e, 0x44, 0x5f, 0x47, 0x45, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x50, 0x4c, 0x41, 0x4e, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x02, 0x12, + 0x27, 0x0a, 0x23, 0x52, 0x41, 0x46, 0x54, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, 0x41, 0x4e, 0x44, 0x5f, + 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x50, 0x4c, 0x41, 0x4e, 0x10, 0x03, 0x42, 0x9d, 0x01, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, + 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x42, 0x0c, 0x52, 0x61, 0x66, 0x74, 0x4c, + 0x6f, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x43, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x61, 0x66, 0x61, 0x6e, 0x61, 0x2f, 0x70, 0x79, + 0x72, 0x6f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x65, 0x6e, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0xa2, 0x02, + 0x03, 0x52, 0x58, 0x58, 0xaa, 0x02, 0x07, 0x52, 0x61, 0x66, 0x74, 0x4c, 0x6f, 0x67, 0xca, 0x02, + 0x07, 0x52, 0x61, 0x66, 0x74, 0x4c, 0x6f, 0x67, 0xe2, 0x02, 0x13, 0x52, 0x61, 0x66, 0x74, 0x4c, + 0x6f, 0x67, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, + 0x07, 0x52, 0x61, 0x66, 0x74, 0x4c, 0x6f, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -163,17 +1083,53 @@ func file_metastore_v1_raft_log_raft_log_proto_rawDescGZIP() []byte { } var file_metastore_v1_raft_log_raft_log_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_metastore_v1_raft_log_raft_log_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_metastore_v1_raft_log_raft_log_proto_msgTypes = make([]protoimpl.MessageInfo, 14) var file_metastore_v1_raft_log_raft_log_proto_goTypes = []any{ - (RaftCommand)(0), // 0: raft_log.RaftCommand - (*CleanBlocksRequest)(nil), // 1: raft_log.CleanBlocksRequest + (RaftCommand)(0), // 0: raft_log.RaftCommand + (*AddBlockMetadataRequest)(nil), // 1: raft_log.AddBlockMetadataRequest + (*AddBlockMetadataResponse)(nil), // 2: raft_log.AddBlockMetadataResponse + (*GetCompactionPlanUpdateRequest)(nil), // 3: raft_log.GetCompactionPlanUpdateRequest + (*CompactionJobStatusUpdate)(nil), // 4: raft_log.CompactionJobStatusUpdate + (*GetCompactionPlanUpdateResponse)(nil), // 5: raft_log.GetCompactionPlanUpdateResponse + (*CompactionPlanUpdate)(nil), // 6: raft_log.CompactionPlanUpdate + (*NewCompactionJob)(nil), // 7: raft_log.NewCompactionJob + (*AssignedCompactionJob)(nil), // 8: raft_log.AssignedCompactionJob + (*UpdatedCompactionJob)(nil), // 9: raft_log.UpdatedCompactionJob + (*CompletedCompactionJob)(nil), // 10: raft_log.CompletedCompactionJob + (*CompactionJobState)(nil), // 11: raft_log.CompactionJobState + (*CompactionJobPlan)(nil), // 12: raft_log.CompactionJobPlan + (*UpdateCompactionPlanRequest)(nil), // 13: raft_log.UpdateCompactionPlanRequest + (*UpdateCompactionPlanResponse)(nil), // 14: raft_log.UpdateCompactionPlanResponse + (*v1.BlockMeta)(nil), // 15: metastore.v1.BlockMeta + (v1.CompactionJobStatus)(0), // 16: metastore.v1.CompactionJobStatus + (*v1.CompactedBlocks)(nil), // 17: metastore.v1.CompactedBlocks + (*v1.Tombstones)(nil), // 18: metastore.v1.Tombstones } var file_metastore_v1_raft_log_raft_log_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 15, // 0: raft_log.AddBlockMetadataRequest.metadata:type_name -> metastore.v1.BlockMeta + 4, // 1: raft_log.GetCompactionPlanUpdateRequest.status_updates:type_name -> raft_log.CompactionJobStatusUpdate + 16, // 2: raft_log.CompactionJobStatusUpdate.status:type_name -> metastore.v1.CompactionJobStatus + 6, // 3: raft_log.GetCompactionPlanUpdateResponse.plan_update:type_name -> raft_log.CompactionPlanUpdate + 7, // 4: raft_log.CompactionPlanUpdate.new_jobs:type_name -> raft_log.NewCompactionJob + 8, // 5: raft_log.CompactionPlanUpdate.assigned_jobs:type_name -> raft_log.AssignedCompactionJob + 9, // 6: raft_log.CompactionPlanUpdate.updated_jobs:type_name -> raft_log.UpdatedCompactionJob + 10, // 7: raft_log.CompactionPlanUpdate.completed_jobs:type_name -> raft_log.CompletedCompactionJob + 11, // 8: raft_log.NewCompactionJob.state:type_name -> raft_log.CompactionJobState + 12, // 9: raft_log.NewCompactionJob.plan:type_name -> raft_log.CompactionJobPlan + 11, // 10: raft_log.AssignedCompactionJob.state:type_name -> raft_log.CompactionJobState + 12, // 11: raft_log.AssignedCompactionJob.plan:type_name -> raft_log.CompactionJobPlan + 11, // 12: raft_log.UpdatedCompactionJob.state:type_name -> raft_log.CompactionJobState + 11, // 13: raft_log.CompletedCompactionJob.state:type_name -> raft_log.CompactionJobState + 17, // 14: raft_log.CompletedCompactionJob.compacted_blocks:type_name -> metastore.v1.CompactedBlocks + 16, // 15: raft_log.CompactionJobState.status:type_name -> metastore.v1.CompactionJobStatus + 18, // 16: raft_log.CompactionJobPlan.tombstones:type_name -> metastore.v1.Tombstones + 6, // 17: raft_log.UpdateCompactionPlanRequest.plan_update:type_name -> raft_log.CompactionPlanUpdate + 6, // 18: raft_log.UpdateCompactionPlanResponse.plan_update:type_name -> raft_log.CompactionPlanUpdate + 19, // [19:19] is the sub-list for method output_type + 19, // [19:19] is the sub-list for method input_type + 19, // [19:19] is the sub-list for extension type_name + 19, // [19:19] is the sub-list for extension extendee + 0, // [0:19] is the sub-list for field type_name } func init() { file_metastore_v1_raft_log_raft_log_proto_init() } @@ -183,7 +1139,163 @@ func file_metastore_v1_raft_log_raft_log_proto_init() { } if !protoimpl.UnsafeEnabled { file_metastore_v1_raft_log_raft_log_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*CleanBlocksRequest); i { + switch v := v.(*AddBlockMetadataRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_raft_log_raft_log_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*AddBlockMetadataResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_raft_log_raft_log_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*GetCompactionPlanUpdateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_raft_log_raft_log_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*CompactionJobStatusUpdate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_raft_log_raft_log_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*GetCompactionPlanUpdateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_raft_log_raft_log_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*CompactionPlanUpdate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_raft_log_raft_log_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*NewCompactionJob); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_raft_log_raft_log_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*AssignedCompactionJob); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_raft_log_raft_log_proto_msgTypes[8].Exporter = func(v any, i int) any { + switch v := v.(*UpdatedCompactionJob); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_raft_log_raft_log_proto_msgTypes[9].Exporter = func(v any, i int) any { + switch v := v.(*CompletedCompactionJob); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_raft_log_raft_log_proto_msgTypes[10].Exporter = func(v any, i int) any { + switch v := v.(*CompactionJobState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_raft_log_raft_log_proto_msgTypes[11].Exporter = func(v any, i int) any { + switch v := v.(*CompactionJobPlan); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_raft_log_raft_log_proto_msgTypes[12].Exporter = func(v any, i int) any { + switch v := v.(*UpdateCompactionPlanRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_raft_log_raft_log_proto_msgTypes[13].Exporter = func(v any, i int) any { + switch v := v.(*UpdateCompactionPlanResponse); i { case 0: return &v.state case 1: @@ -201,7 +1313,7 @@ func file_metastore_v1_raft_log_raft_log_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_metastore_v1_raft_log_raft_log_proto_rawDesc, NumEnums: 1, - NumMessages: 1, + NumMessages: 14, NumExtensions: 0, NumServices: 0, }, diff --git a/api/gen/proto/go/metastore/v1/raft_log/raft_log_vtproto.pb.go b/api/gen/proto/go/metastore/v1/raft_log/raft_log_vtproto.pb.go index 936eea6456..75da6e84ba 100644 --- a/api/gen/proto/go/metastore/v1/raft_log/raft_log_vtproto.pb.go +++ b/api/gen/proto/go/metastore/v1/raft_log/raft_log_vtproto.pb.go @@ -6,6 +6,7 @@ package raft_log import ( fmt "fmt" + v1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" protohelpers "github.com/planetscale/vtprotobuf/protohelpers" proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -19,12 +20,310 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -func (m *CleanBlocksRequest) CloneVT() *CleanBlocksRequest { +func (m *AddBlockMetadataRequest) CloneVT() *AddBlockMetadataRequest { if m == nil { - return (*CleanBlocksRequest)(nil) + return (*AddBlockMetadataRequest)(nil) } - r := new(CleanBlocksRequest) - r.RequestId = m.RequestId + r := new(AddBlockMetadataRequest) + if rhs := m.Metadata; rhs != nil { + if vtpb, ok := interface{}(rhs).(interface{ CloneVT() *v1.BlockMeta }); ok { + r.Metadata = vtpb.CloneVT() + } else { + r.Metadata = proto.Clone(rhs).(*v1.BlockMeta) + } + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *AddBlockMetadataRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *AddBlockMetadataResponse) CloneVT() *AddBlockMetadataResponse { + if m == nil { + return (*AddBlockMetadataResponse)(nil) + } + r := new(AddBlockMetadataResponse) + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *AddBlockMetadataResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCompactionPlanUpdateRequest) CloneVT() *GetCompactionPlanUpdateRequest { + if m == nil { + return (*GetCompactionPlanUpdateRequest)(nil) + } + r := new(GetCompactionPlanUpdateRequest) + r.AssignJobsMax = m.AssignJobsMax + if rhs := m.StatusUpdates; rhs != nil { + tmpContainer := make([]*CompactionJobStatusUpdate, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.StatusUpdates = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetCompactionPlanUpdateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CompactionJobStatusUpdate) CloneVT() *CompactionJobStatusUpdate { + if m == nil { + return (*CompactionJobStatusUpdate)(nil) + } + r := new(CompactionJobStatusUpdate) + r.Name = m.Name + r.Token = m.Token + r.Status = m.Status + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CompactionJobStatusUpdate) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCompactionPlanUpdateResponse) CloneVT() *GetCompactionPlanUpdateResponse { + if m == nil { + return (*GetCompactionPlanUpdateResponse)(nil) + } + r := new(GetCompactionPlanUpdateResponse) + r.Term = m.Term + r.PlanUpdate = m.PlanUpdate.CloneVT() + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetCompactionPlanUpdateResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CompactionPlanUpdate) CloneVT() *CompactionPlanUpdate { + if m == nil { + return (*CompactionPlanUpdate)(nil) + } + r := new(CompactionPlanUpdate) + if rhs := m.NewJobs; rhs != nil { + tmpContainer := make([]*NewCompactionJob, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.NewJobs = tmpContainer + } + if rhs := m.AssignedJobs; rhs != nil { + tmpContainer := make([]*AssignedCompactionJob, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.AssignedJobs = tmpContainer + } + if rhs := m.UpdatedJobs; rhs != nil { + tmpContainer := make([]*UpdatedCompactionJob, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.UpdatedJobs = tmpContainer + } + if rhs := m.CompletedJobs; rhs != nil { + tmpContainer := make([]*CompletedCompactionJob, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.CompletedJobs = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CompactionPlanUpdate) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *NewCompactionJob) CloneVT() *NewCompactionJob { + if m == nil { + return (*NewCompactionJob)(nil) + } + r := new(NewCompactionJob) + r.State = m.State.CloneVT() + r.Plan = m.Plan.CloneVT() + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *NewCompactionJob) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *AssignedCompactionJob) CloneVT() *AssignedCompactionJob { + if m == nil { + return (*AssignedCompactionJob)(nil) + } + r := new(AssignedCompactionJob) + r.State = m.State.CloneVT() + r.Plan = m.Plan.CloneVT() + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *AssignedCompactionJob) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UpdatedCompactionJob) CloneVT() *UpdatedCompactionJob { + if m == nil { + return (*UpdatedCompactionJob)(nil) + } + r := new(UpdatedCompactionJob) + r.State = m.State.CloneVT() + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *UpdatedCompactionJob) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CompletedCompactionJob) CloneVT() *CompletedCompactionJob { + if m == nil { + return (*CompletedCompactionJob)(nil) + } + r := new(CompletedCompactionJob) + r.State = m.State.CloneVT() + if rhs := m.CompactedBlocks; rhs != nil { + if vtpb, ok := interface{}(rhs).(interface{ CloneVT() *v1.CompactedBlocks }); ok { + r.CompactedBlocks = vtpb.CloneVT() + } else { + r.CompactedBlocks = proto.Clone(rhs).(*v1.CompactedBlocks) + } + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CompletedCompactionJob) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CompactionJobState) CloneVT() *CompactionJobState { + if m == nil { + return (*CompactionJobState)(nil) + } + r := new(CompactionJobState) + r.Name = m.Name + r.CompactionLevel = m.CompactionLevel + r.Status = m.Status + r.Token = m.Token + r.LeaseExpiresAt = m.LeaseExpiresAt + r.AddedAt = m.AddedAt + r.Failures = m.Failures + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CompactionJobState) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CompactionJobPlan) CloneVT() *CompactionJobPlan { + if m == nil { + return (*CompactionJobPlan)(nil) + } + r := new(CompactionJobPlan) + r.Name = m.Name + r.Tenant = m.Tenant + r.Shard = m.Shard + r.CompactionLevel = m.CompactionLevel + if rhs := m.SourceBlocks; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.SourceBlocks = tmpContainer + } + if rhs := m.Tombstones; rhs != nil { + tmpContainer := make([]*v1.Tombstones, len(rhs)) + for k, v := range rhs { + if vtpb, ok := interface{}(v).(interface{ CloneVT() *v1.Tombstones }); ok { + tmpContainer[k] = vtpb.CloneVT() + } else { + tmpContainer[k] = proto.Clone(v).(*v1.Tombstones) + } + } + r.Tombstones = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CompactionJobPlan) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UpdateCompactionPlanRequest) CloneVT() *UpdateCompactionPlanRequest { + if m == nil { + return (*UpdateCompactionPlanRequest)(nil) + } + r := new(UpdateCompactionPlanRequest) + r.Term = m.Term + r.PlanUpdate = m.PlanUpdate.CloneVT() + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *UpdateCompactionPlanRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UpdateCompactionPlanResponse) CloneVT() *UpdateCompactionPlanResponse { + if m == nil { + return (*UpdateCompactionPlanResponse)(nil) + } + r := new(UpdateCompactionPlanResponse) + r.PlanUpdate = m.PlanUpdate.CloneVT() if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -32,84 +331,3183 @@ func (m *CleanBlocksRequest) CloneVT() *CleanBlocksRequest { return r } -func (m *CleanBlocksRequest) CloneMessageVT() proto.Message { +func (m *UpdateCompactionPlanResponse) CloneMessageVT() proto.Message { return m.CloneVT() } -func (this *CleanBlocksRequest) EqualVT(that *CleanBlocksRequest) bool { +func (this *AddBlockMetadataRequest) EqualVT(that *AddBlockMetadataRequest) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if equal, ok := interface{}(this.Metadata).(interface{ EqualVT(*v1.BlockMeta) bool }); ok { + if !equal.EqualVT(that.Metadata) { + return false + } + } else if !proto.Equal(this.Metadata, that.Metadata) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *AddBlockMetadataRequest) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*AddBlockMetadataRequest) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *AddBlockMetadataResponse) EqualVT(that *AddBlockMetadataResponse) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *AddBlockMetadataResponse) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*AddBlockMetadataResponse) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *GetCompactionPlanUpdateRequest) EqualVT(that *GetCompactionPlanUpdateRequest) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.StatusUpdates) != len(that.StatusUpdates) { + return false + } + for i, vx := range this.StatusUpdates { + vy := that.StatusUpdates[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &CompactionJobStatusUpdate{} + } + if q == nil { + q = &CompactionJobStatusUpdate{} + } + if !p.EqualVT(q) { + return false + } + } + } + if this.AssignJobsMax != that.AssignJobsMax { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *GetCompactionPlanUpdateRequest) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*GetCompactionPlanUpdateRequest) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *CompactionJobStatusUpdate) EqualVT(that *CompactionJobStatusUpdate) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Name != that.Name { + return false + } + if this.Token != that.Token { + return false + } + if this.Status != that.Status { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *CompactionJobStatusUpdate) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*CompactionJobStatusUpdate) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *GetCompactionPlanUpdateResponse) EqualVT(that *GetCompactionPlanUpdateResponse) bool { if this == that { return true } else if this == nil || that == nil { return false } - if this.RequestId != that.RequestId { + if this.Term != that.Term { + return false + } + if !this.PlanUpdate.EqualVT(that.PlanUpdate) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *GetCompactionPlanUpdateResponse) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*GetCompactionPlanUpdateResponse) + if !ok { return false } - return string(this.unknownFields) == string(that.unknownFields) -} + return this.EqualVT(that) +} +func (this *CompactionPlanUpdate) EqualVT(that *CompactionPlanUpdate) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if len(this.NewJobs) != len(that.NewJobs) { + return false + } + for i, vx := range this.NewJobs { + vy := that.NewJobs[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &NewCompactionJob{} + } + if q == nil { + q = &NewCompactionJob{} + } + if !p.EqualVT(q) { + return false + } + } + } + if len(this.AssignedJobs) != len(that.AssignedJobs) { + return false + } + for i, vx := range this.AssignedJobs { + vy := that.AssignedJobs[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &AssignedCompactionJob{} + } + if q == nil { + q = &AssignedCompactionJob{} + } + if !p.EqualVT(q) { + return false + } + } + } + if len(this.UpdatedJobs) != len(that.UpdatedJobs) { + return false + } + for i, vx := range this.UpdatedJobs { + vy := that.UpdatedJobs[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &UpdatedCompactionJob{} + } + if q == nil { + q = &UpdatedCompactionJob{} + } + if !p.EqualVT(q) { + return false + } + } + } + if len(this.CompletedJobs) != len(that.CompletedJobs) { + return false + } + for i, vx := range this.CompletedJobs { + vy := that.CompletedJobs[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &CompletedCompactionJob{} + } + if q == nil { + q = &CompletedCompactionJob{} + } + if !p.EqualVT(q) { + return false + } + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *CompactionPlanUpdate) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*CompactionPlanUpdate) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *NewCompactionJob) EqualVT(that *NewCompactionJob) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if !this.State.EqualVT(that.State) { + return false + } + if !this.Plan.EqualVT(that.Plan) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *NewCompactionJob) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*NewCompactionJob) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *AssignedCompactionJob) EqualVT(that *AssignedCompactionJob) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if !this.State.EqualVT(that.State) { + return false + } + if !this.Plan.EqualVT(that.Plan) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *AssignedCompactionJob) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*AssignedCompactionJob) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *UpdatedCompactionJob) EqualVT(that *UpdatedCompactionJob) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if !this.State.EqualVT(that.State) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *UpdatedCompactionJob) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*UpdatedCompactionJob) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *CompletedCompactionJob) EqualVT(that *CompletedCompactionJob) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if !this.State.EqualVT(that.State) { + return false + } + if equal, ok := interface{}(this.CompactedBlocks).(interface { + EqualVT(*v1.CompactedBlocks) bool + }); ok { + if !equal.EqualVT(that.CompactedBlocks) { + return false + } + } else if !proto.Equal(this.CompactedBlocks, that.CompactedBlocks) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *CompletedCompactionJob) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*CompletedCompactionJob) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *CompactionJobState) EqualVT(that *CompactionJobState) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Name != that.Name { + return false + } + if this.CompactionLevel != that.CompactionLevel { + return false + } + if this.Status != that.Status { + return false + } + if this.Token != that.Token { + return false + } + if this.LeaseExpiresAt != that.LeaseExpiresAt { + return false + } + if this.AddedAt != that.AddedAt { + return false + } + if this.Failures != that.Failures { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *CompactionJobState) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*CompactionJobState) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *CompactionJobPlan) EqualVT(that *CompactionJobPlan) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Name != that.Name { + return false + } + if this.Tenant != that.Tenant { + return false + } + if this.Shard != that.Shard { + return false + } + if this.CompactionLevel != that.CompactionLevel { + return false + } + if len(this.SourceBlocks) != len(that.SourceBlocks) { + return false + } + for i, vx := range this.SourceBlocks { + vy := that.SourceBlocks[i] + if vx != vy { + return false + } + } + if len(this.Tombstones) != len(that.Tombstones) { + return false + } + for i, vx := range this.Tombstones { + vy := that.Tombstones[i] + if p, q := vx, vy; p != q { + if p == nil { + p = &v1.Tombstones{} + } + if q == nil { + q = &v1.Tombstones{} + } + if equal, ok := interface{}(p).(interface{ EqualVT(*v1.Tombstones) bool }); ok { + if !equal.EqualVT(q) { + return false + } + } else if !proto.Equal(p, q) { + return false + } + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *CompactionJobPlan) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*CompactionJobPlan) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *UpdateCompactionPlanRequest) EqualVT(that *UpdateCompactionPlanRequest) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Term != that.Term { + return false + } + if !this.PlanUpdate.EqualVT(that.PlanUpdate) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *UpdateCompactionPlanRequest) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*UpdateCompactionPlanRequest) + if !ok { + return false + } + return this.EqualVT(that) +} +func (this *UpdateCompactionPlanResponse) EqualVT(that *UpdateCompactionPlanResponse) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if !this.PlanUpdate.EqualVT(that.PlanUpdate) { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *UpdateCompactionPlanResponse) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*UpdateCompactionPlanResponse) + if !ok { + return false + } + return this.EqualVT(that) +} +func (m *AddBlockMetadataRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AddBlockMetadataRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AddBlockMetadataRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metadata != nil { + if vtmsg, ok := interface{}(m.Metadata).(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Metadata) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AddBlockMetadataResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AddBlockMetadataResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AddBlockMetadataResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetCompactionPlanUpdateRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetCompactionPlanUpdateRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetCompactionPlanUpdateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AssignJobsMax != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.AssignJobsMax)) + i-- + dAtA[i] = 0x10 + } + if len(m.StatusUpdates) > 0 { + for iNdEx := len(m.StatusUpdates) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.StatusUpdates[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CompactionJobStatusUpdate) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactionJobStatusUpdate) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CompactionJobStatusUpdate) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Status != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x18 + } + if m.Token != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Token)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetCompactionPlanUpdateResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetCompactionPlanUpdateResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetCompactionPlanUpdateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PlanUpdate != nil { + size, err := m.PlanUpdate.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Term != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Term)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CompactionPlanUpdate) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactionPlanUpdate) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CompactionPlanUpdate) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.CompletedJobs) > 0 { + for iNdEx := len(m.CompletedJobs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CompletedJobs[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.UpdatedJobs) > 0 { + for iNdEx := len(m.UpdatedJobs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.UpdatedJobs[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.AssignedJobs) > 0 { + for iNdEx := len(m.AssignedJobs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.AssignedJobs[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.NewJobs) > 0 { + for iNdEx := len(m.NewJobs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.NewJobs[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NewCompactionJob) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NewCompactionJob) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *NewCompactionJob) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Plan != nil { + size, err := m.Plan.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.State != nil { + size, err := m.State.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AssignedCompactionJob) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AssignedCompactionJob) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AssignedCompactionJob) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Plan != nil { + size, err := m.Plan.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.State != nil { + size, err := m.State.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdatedCompactionJob) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdatedCompactionJob) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdatedCompactionJob) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.State != nil { + size, err := m.State.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CompletedCompactionJob) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompletedCompactionJob) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CompletedCompactionJob) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.CompactedBlocks != nil { + if vtmsg, ok := interface{}(m.CompactedBlocks).(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.CompactedBlocks) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x12 + } + if m.State != nil { + size, err := m.State.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CompactionJobState) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactionJobState) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CompactionJobState) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Failures != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Failures)) + i-- + dAtA[i] = 0x38 + } + if m.AddedAt != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.AddedAt)) + i-- + dAtA[i] = 0x30 + } + if m.LeaseExpiresAt != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LeaseExpiresAt)) + i-- + dAtA[i] = 0x28 + } + if m.Token != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Token)) + i-- + dAtA[i] = 0x20 + } + if m.Status != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x18 + } + if m.CompactionLevel != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CompactionLevel)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CompactionJobPlan) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompactionJobPlan) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CompactionJobPlan) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Tombstones) > 0 { + for iNdEx := len(m.Tombstones) - 1; iNdEx >= 0; iNdEx-- { + if vtmsg, ok := interface{}(m.Tombstones[iNdEx]).(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + } else { + encoded, err := proto.Marshal(m.Tombstones[iNdEx]) + if err != nil { + return 0, err + } + i -= len(encoded) + copy(dAtA[i:], encoded) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded))) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.SourceBlocks) > 0 { + for iNdEx := len(m.SourceBlocks) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SourceBlocks[iNdEx]) + copy(dAtA[i:], m.SourceBlocks[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SourceBlocks[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.CompactionLevel != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CompactionLevel)) + i-- + dAtA[i] = 0x20 + } + if m.Shard != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Shard)) + i-- + dAtA[i] = 0x18 + } + if len(m.Tenant) > 0 { + i -= len(m.Tenant) + copy(dAtA[i:], m.Tenant) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Tenant))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateCompactionPlanRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateCompactionPlanRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdateCompactionPlanRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PlanUpdate != nil { + size, err := m.PlanUpdate.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Term != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Term)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UpdateCompactionPlanResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateCompactionPlanResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdateCompactionPlanResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PlanUpdate != nil { + size, err := m.PlanUpdate.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AddBlockMetadataRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Metadata != nil { + if size, ok := interface{}(m.Metadata).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.Metadata) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *AddBlockMetadataResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetCompactionPlanUpdateRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StatusUpdates) > 0 { + for _, e := range m.StatusUpdates { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.AssignJobsMax != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.AssignJobsMax)) + } + n += len(m.unknownFields) + return n +} + +func (m *CompactionJobStatusUpdate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Token != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Token)) + } + if m.Status != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Status)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetCompactionPlanUpdateResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Term != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Term)) + } + if m.PlanUpdate != nil { + l = m.PlanUpdate.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CompactionPlanUpdate) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NewJobs) > 0 { + for _, e := range m.NewJobs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.AssignedJobs) > 0 { + for _, e := range m.AssignedJobs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.UpdatedJobs) > 0 { + for _, e := range m.UpdatedJobs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.CompletedJobs) > 0 { + for _, e := range m.CompletedJobs { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *NewCompactionJob) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.State != nil { + l = m.State.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Plan != nil { + l = m.Plan.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *AssignedCompactionJob) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.State != nil { + l = m.State.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Plan != nil { + l = m.Plan.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpdatedCompactionJob) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.State != nil { + l = m.State.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CompletedCompactionJob) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.State != nil { + l = m.State.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CompactedBlocks != nil { + if size, ok := interface{}(m.CompactedBlocks).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(m.CompactedBlocks) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CompactionJobState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.CompactionLevel != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CompactionLevel)) + } + if m.Status != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Status)) + } + if m.Token != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Token)) + } + if m.LeaseExpiresAt != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.LeaseExpiresAt)) + } + if m.AddedAt != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.AddedAt)) + } + if m.Failures != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Failures)) + } + n += len(m.unknownFields) + return n +} + +func (m *CompactionJobPlan) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Tenant) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Shard != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Shard)) + } + if m.CompactionLevel != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CompactionLevel)) + } + if len(m.SourceBlocks) > 0 { + for _, s := range m.SourceBlocks { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Tombstones) > 0 { + for _, e := range m.Tombstones { + if size, ok := interface{}(e).(interface { + SizeVT() int + }); ok { + l = size.SizeVT() + } else { + l = proto.Size(e) + } + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateCompactionPlanRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Term != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Term)) + } + if m.PlanUpdate != nil { + l = m.PlanUpdate.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateCompactionPlanResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PlanUpdate != nil { + l = m.PlanUpdate.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *AddBlockMetadataRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddBlockMetadataRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddBlockMetadataRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &v1.BlockMeta{} + } + if unmarshal, ok := interface{}(m.Metadata).(interface { + UnmarshalVT([]byte) error + }); ok { + if err := unmarshal.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + if err := proto.Unmarshal(dAtA[iNdEx:postIndex], m.Metadata); err != nil { + return err + } + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddBlockMetadataResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddBlockMetadataResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddBlockMetadataResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCompactionPlanUpdateRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCompactionPlanUpdateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCompactionPlanUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StatusUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StatusUpdates = append(m.StatusUpdates, &CompactionJobStatusUpdate{}) + if err := m.StatusUpdates[len(m.StatusUpdates)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AssignJobsMax", wireType) + } + m.AssignJobsMax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AssignJobsMax |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactionJobStatusUpdate) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionJobStatusUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionJobStatusUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + m.Token = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Token |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= v1.CompactionJobStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCompactionPlanUpdateResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCompactionPlanUpdateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCompactionPlanUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PlanUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PlanUpdate == nil { + m.PlanUpdate = &CompactionPlanUpdate{} + } + if err := m.PlanUpdate.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactionPlanUpdate) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionPlanUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionPlanUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewJobs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NewJobs = append(m.NewJobs, &NewCompactionJob{}) + if err := m.NewJobs[len(m.NewJobs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AssignedJobs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AssignedJobs = append(m.AssignedJobs, &AssignedCompactionJob{}) + if err := m.AssignedJobs[len(m.AssignedJobs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedJobs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UpdatedJobs = append(m.UpdatedJobs, &UpdatedCompactionJob{}) + if err := m.UpdatedJobs[len(m.UpdatedJobs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletedJobs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CompletedJobs = append(m.CompletedJobs, &CompletedCompactionJob{}) + if err := m.CompletedJobs[len(m.CompletedJobs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NewCompactionJob) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NewCompactionJob: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NewCompactionJob: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.State == nil { + m.State = &CompactionJobState{} + } + if err := m.State.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Plan == nil { + m.Plan = &CompactionJobPlan{} + } + if err := m.Plan.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AssignedCompactionJob) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AssignedCompactionJob: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AssignedCompactionJob: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.State == nil { + m.State = &CompactionJobState{} + } + if err := m.State.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Plan == nil { + m.Plan = &CompactionJobPlan{} + } + if err := m.Plan.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdatedCompactionJob) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdatedCompactionJob: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdatedCompactionJob: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.State == nil { + m.State = &CompactionJobState{} + } + if err := m.State.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompletedCompactionJob) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompletedCompactionJob: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompletedCompactionJob: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.State == nil { + m.State = &CompactionJobState{} + } + if err := m.State.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactedBlocks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompactedBlocks == nil { + m.CompactedBlocks = &v1.CompactedBlocks{} + } + if unmarshal, ok := interface{}(m.CompactedBlocks).(interface { + UnmarshalVT([]byte) error + }); ok { + if err := unmarshal.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + if err := proto.Unmarshal(dAtA[iNdEx:postIndex], m.CompactedBlocks); err != nil { + return err + } + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactionJobState) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionJobState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionJobState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactionLevel", wireType) + } + m.CompactionLevel = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactionLevel |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= v1.CompactionJobStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + m.Token = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Token |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseExpiresAt", wireType) + } + m.LeaseExpiresAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LeaseExpiresAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AddedAt", wireType) + } + m.AddedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AddedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failures", wireType) + } + m.Failures = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Failures |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompactionJobPlan) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompactionJobPlan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompactionJobPlan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tenant", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tenant = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + m.Shard = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Shard |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompactionLevel", wireType) + } + m.CompactionLevel = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompactionLevel |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceBlocks", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceBlocks = append(m.SourceBlocks, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tombstones", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tombstones = append(m.Tombstones, &v1.Tombstones{}) + if unmarshal, ok := interface{}(m.Tombstones[len(m.Tombstones)-1]).(interface { + UnmarshalVT([]byte) error + }); ok { + if err := unmarshal.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + if err := proto.Unmarshal(dAtA[iNdEx:postIndex], m.Tombstones[len(m.Tombstones)-1]); err != nil { + return err + } + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } -func (this *CleanBlocksRequest) EqualMessageVT(thatMsg proto.Message) bool { - that, ok := thatMsg.(*CleanBlocksRequest) - if !ok { - return false - } - return this.EqualVT(that) -} -func (m *CleanBlocksRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + if iNdEx > l { + return io.ErrUnexpectedEOF } - return dAtA[:n], nil -} - -func (m *CleanBlocksRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) + return nil } - -func (m *CleanBlocksRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.RequestId) > 0 { - i -= len(m.RequestId) - copy(dAtA[i:], m.RequestId) - i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RequestId))) - i-- - dAtA[i] = 0xa +func (m *UpdateCompactionPlanRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateCompactionPlanRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateCompactionPlanRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PlanUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PlanUpdate == nil { + m.PlanUpdate = &CompactionPlanUpdate{} + } + if err := m.PlanUpdate.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } } - return len(dAtA) - i, nil -} -func (m *CleanBlocksRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.RequestId) - if l > 0 { - n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + if iNdEx > l { + return io.ErrUnexpectedEOF } - n += len(m.unknownFields) - return n + return nil } - -func (m *CleanBlocksRequest) UnmarshalVT(dAtA []byte) error { +func (m *UpdateCompactionPlanResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -132,17 +3530,17 @@ func (m *CleanBlocksRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CleanBlocksRequest: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateCompactionPlanResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CleanBlocksRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateCompactionPlanResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PlanUpdate", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow @@ -152,23 +3550,27 @@ func (m *CleanBlocksRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return protohelpers.ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return protohelpers.ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.RequestId = string(dAtA[iNdEx:postIndex]) + if m.PlanUpdate == nil { + m.PlanUpdate = &CompactionPlanUpdate{} + } + if err := m.PlanUpdate.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex diff --git a/api/gen/proto/go/metastore/v1/types.pb.go b/api/gen/proto/go/metastore/v1/types.pb.go index 94d64b399c..aef37a29c1 100644 --- a/api/gen/proto/go/metastore/v1/types.pb.go +++ b/api/gen/proto/go/metastore/v1/types.pb.go @@ -21,6 +21,69 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type BlockList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tenant string `protobuf:"bytes,1,opt,name=tenant,proto3" json:"tenant,omitempty"` + Shard uint32 `protobuf:"varint,2,opt,name=shard,proto3" json:"shard,omitempty"` + Blocks []string `protobuf:"bytes,3,rep,name=blocks,proto3" json:"blocks,omitempty"` +} + +func (x *BlockList) Reset() { + *x = BlockList{} + if protoimpl.UnsafeEnabled { + mi := &file_metastore_v1_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlockList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlockList) ProtoMessage() {} + +func (x *BlockList) ProtoReflect() protoreflect.Message { + mi := &file_metastore_v1_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlockList.ProtoReflect.Descriptor instead. +func (*BlockList) Descriptor() ([]byte, []int) { + return file_metastore_v1_types_proto_rawDescGZIP(), []int{0} +} + +func (x *BlockList) GetTenant() string { + if x != nil { + return x.Tenant + } + return "" +} + +func (x *BlockList) GetShard() uint32 { + if x != nil { + return x.Shard + } + return 0 +} + +func (x *BlockList) GetBlocks() []string { + if x != nil { + return x.Blocks + } + return nil +} + type BlockMeta struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -42,7 +105,7 @@ type BlockMeta struct { func (x *BlockMeta) Reset() { *x = BlockMeta{} if protoimpl.UnsafeEnabled { - mi := &file_metastore_v1_types_proto_msgTypes[0] + mi := &file_metastore_v1_types_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -55,7 +118,7 @@ func (x *BlockMeta) String() string { func (*BlockMeta) ProtoMessage() {} func (x *BlockMeta) ProtoReflect() protoreflect.Message { - mi := &file_metastore_v1_types_proto_msgTypes[0] + mi := &file_metastore_v1_types_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -68,7 +131,7 @@ func (x *BlockMeta) ProtoReflect() protoreflect.Message { // Deprecated: Use BlockMeta.ProtoReflect.Descriptor instead. func (*BlockMeta) Descriptor() ([]byte, []int) { - return file_metastore_v1_types_proto_rawDescGZIP(), []int{0} + return file_metastore_v1_types_proto_rawDescGZIP(), []int{1} } func (x *BlockMeta) GetFormatVersion() uint64 { @@ -170,7 +233,7 @@ type Dataset struct { func (x *Dataset) Reset() { *x = Dataset{} if protoimpl.UnsafeEnabled { - mi := &file_metastore_v1_types_proto_msgTypes[1] + mi := &file_metastore_v1_types_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -183,7 +246,7 @@ func (x *Dataset) String() string { func (*Dataset) ProtoMessage() {} func (x *Dataset) ProtoReflect() protoreflect.Message { - mi := &file_metastore_v1_types_proto_msgTypes[1] + mi := &file_metastore_v1_types_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -196,7 +259,7 @@ func (x *Dataset) ProtoReflect() protoreflect.Message { // Deprecated: Use Dataset.ProtoReflect.Descriptor instead. func (*Dataset) Descriptor() ([]byte, []int) { - return file_metastore_v1_types_proto_rawDescGZIP(), []int{1} + return file_metastore_v1_types_proto_rawDescGZIP(), []int{2} } func (x *Dataset) GetTenantId() string { @@ -261,56 +324,61 @@ var file_metastore_v1_types_proto_rawDesc = []byte{ 0x0a, 0x18, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x14, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, - 0x76, 0x31, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbc, - 0x02, 0x0a, 0x09, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, - 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x19, - 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x07, 0x6d, 0x61, 0x78, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, - 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, - 0x76, 0x65, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, - 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, - 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x73, - 0x65, 0x74, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x65, 0x74, 0x61, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, - 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1d, - 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x79, 0x22, 0xff, 0x01, - 0x0a, 0x07, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, - 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, - 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x69, - 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x69, - 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x63, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0f, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x4f, 0x66, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, - 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x76, 0x31, - 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x42, - 0xb7, 0x01, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x79, 0x70, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, - 0x72, 0x61, 0x66, 0x61, 0x6e, 0x61, 0x2f, 0x70, 0x79, 0x72, 0x6f, 0x73, 0x63, 0x6f, 0x70, 0x65, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, - 0x6f, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x6d, - 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x4d, 0x58, 0x58, - 0xaa, 0x02, 0x0c, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x31, 0xca, - 0x02, 0x0c, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, 0xe2, 0x02, - 0x18, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, 0x5c, 0x47, 0x50, - 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0d, 0x4d, 0x65, 0x74, 0x61, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x76, 0x31, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x51, + 0x0a, 0x09, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x74, + 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x65, 0x6e, + 0x61, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x73, 0x22, 0xbc, 0x02, 0x0a, 0x09, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x12, + 0x25, 0x0a, 0x0e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f, + 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x1b, 0x0a, + 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x08, 0x64, 0x61, + 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, + 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, + 0x73, 0x65, 0x74, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x12, 0x12, 0x0a, + 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x69, 0x7a, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x79, + 0x22, 0xff, 0x01, 0x0a, 0x07, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, + 0x08, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x07, 0x6d, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x54, + 0x69, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0f, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4f, 0x66, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x12, + 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, + 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x66, + 0x69, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x42, 0xb7, 0x01, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x2e, 0x6d, 0x65, 0x74, 0x61, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x79, 0x70, 0x65, 0x73, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x72, 0x61, 0x66, 0x61, 0x6e, 0x61, 0x2f, 0x70, 0x79, 0x72, 0x6f, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x76, + 0x31, 0x3b, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x76, 0x31, 0xa2, 0x02, 0x03, + 0x4d, 0x58, 0x58, 0xaa, 0x02, 0x0c, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2e, + 0x56, 0x31, 0xca, 0x02, 0x0c, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5c, 0x56, + 0x31, 0xe2, 0x02, 0x18, 0x4d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5c, 0x56, 0x31, + 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0d, 0x4d, + 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -325,15 +393,16 @@ func file_metastore_v1_types_proto_rawDescGZIP() []byte { return file_metastore_v1_types_proto_rawDescData } -var file_metastore_v1_types_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_metastore_v1_types_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_metastore_v1_types_proto_goTypes = []any{ - (*BlockMeta)(nil), // 0: metastore.v1.BlockMeta - (*Dataset)(nil), // 1: metastore.v1.Dataset - (*v1.Labels)(nil), // 2: types.v1.Labels + (*BlockList)(nil), // 0: metastore.v1.BlockList + (*BlockMeta)(nil), // 1: metastore.v1.BlockMeta + (*Dataset)(nil), // 2: metastore.v1.Dataset + (*v1.Labels)(nil), // 3: types.v1.Labels } var file_metastore_v1_types_proto_depIdxs = []int32{ - 1, // 0: metastore.v1.BlockMeta.datasets:type_name -> metastore.v1.Dataset - 2, // 1: metastore.v1.Dataset.labels:type_name -> types.v1.Labels + 2, // 0: metastore.v1.BlockMeta.datasets:type_name -> metastore.v1.Dataset + 3, // 1: metastore.v1.Dataset.labels:type_name -> types.v1.Labels 2, // [2:2] is the sub-list for method output_type 2, // [2:2] is the sub-list for method input_type 2, // [2:2] is the sub-list for extension type_name @@ -348,7 +417,7 @@ func file_metastore_v1_types_proto_init() { } if !protoimpl.UnsafeEnabled { file_metastore_v1_types_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*BlockMeta); i { + switch v := v.(*BlockList); i { case 0: return &v.state case 1: @@ -360,6 +429,18 @@ func file_metastore_v1_types_proto_init() { } } file_metastore_v1_types_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*BlockMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_metastore_v1_types_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Dataset); i { case 0: return &v.state @@ -378,7 +459,7 @@ func file_metastore_v1_types_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_metastore_v1_types_proto_rawDesc, NumEnums: 0, - NumMessages: 2, + NumMessages: 3, NumExtensions: 0, NumServices: 0, }, diff --git a/api/gen/proto/go/metastore/v1/types_vtproto.pb.go b/api/gen/proto/go/metastore/v1/types_vtproto.pb.go index 334423610d..8a47bc41f8 100644 --- a/api/gen/proto/go/metastore/v1/types_vtproto.pb.go +++ b/api/gen/proto/go/metastore/v1/types_vtproto.pb.go @@ -20,6 +20,29 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *BlockList) CloneVT() *BlockList { + if m == nil { + return (*BlockList)(nil) + } + r := new(BlockList) + r.Tenant = m.Tenant + r.Shard = m.Shard + if rhs := m.Blocks; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Blocks = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BlockList) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *BlockMeta) CloneVT() *BlockMeta { if m == nil { return (*BlockMeta)(nil) @@ -94,6 +117,37 @@ func (m *Dataset) CloneMessageVT() proto.Message { return m.CloneVT() } +func (this *BlockList) EqualVT(that *BlockList) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Tenant != that.Tenant { + return false + } + if this.Shard != that.Shard { + return false + } + if len(this.Blocks) != len(that.Blocks) { + return false + } + for i, vx := range this.Blocks { + vy := that.Blocks[i] + if vx != vy { + return false + } + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *BlockList) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*BlockList) + if !ok { + return false + } + return this.EqualVT(that) +} func (this *BlockMeta) EqualVT(that *BlockMeta) bool { if this == that { return true @@ -224,6 +278,60 @@ func (this *Dataset) EqualMessageVT(thatMsg proto.Message) bool { } return this.EqualVT(that) } +func (m *BlockList) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockList) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *BlockList) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Blocks) > 0 { + for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Blocks[iNdEx]) + copy(dAtA[i:], m.Blocks[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Blocks[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.Shard != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Shard)) + i-- + dAtA[i] = 0x10 + } + if len(m.Tenant) > 0 { + i -= len(m.Tenant) + copy(dAtA[i:], m.Tenant) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Tenant))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *BlockMeta) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -435,6 +543,29 @@ func (m *Dataset) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *BlockList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tenant) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Shard != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Shard)) + } + if len(m.Blocks) > 0 { + for _, s := range m.Blocks { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + func (m *BlockMeta) SizeVT() (n int) { if m == nil { return 0 @@ -533,6 +664,140 @@ func (m *Dataset) SizeVT() (n int) { return n } +func (m *BlockList) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tenant", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tenant = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + m.Shard = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Shard |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blocks = append(m.Blocks, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *BlockMeta) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/api/go.mod b/api/go.mod index edec2d8255..dc8b589398 100644 --- a/api/go.mod +++ b/api/go.mod @@ -16,7 +16,7 @@ require ( require ( golang.org/x/net v0.28.0 // indirect - golang.org/x/sys v0.25.0 // indirect + golang.org/x/sys v0.27.0 // indirect golang.org/x/text v0.19.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect ) diff --git a/api/go.sum b/api/go.sum index ee0b5cb302..999ad87764 100644 --- a/api/go.sum +++ b/api/go.sum @@ -33,8 +33,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= diff --git a/api/metastore/v1/compactor.proto b/api/metastore/v1/compactor.proto index 65e4a01b16..becade9c25 100644 --- a/api/metastore/v1/compactor.proto +++ b/api/metastore/v1/compactor.proto @@ -10,85 +10,63 @@ service CompactionService { } message PollCompactionJobsRequest { - // A batch of status updates for in-progress jobs from a worker. - repeated CompactionJobStatus job_status_updates = 1; + repeated CompactionJobStatusUpdate status_updates = 1; // How many new jobs a worker can be assigned to. uint32 job_capacity = 2; } message PollCompactionJobsResponse { repeated CompactionJob compaction_jobs = 1; + repeated CompactionJobAssignment assignments = 2; } -message GetCompactionRequest {} +message CompactionJob { + string name = 1; + uint32 shard = 2; + string tenant = 3; + uint32 compaction_level = 4; + repeated string source_blocks = 5; + repeated Tombstones tombstones = 6; +} -message GetCompactionResponse { - // A list of all compaction jobs - repeated CompactionJob compaction_jobs = 1; +// Tombstones represent objects removed from the index but still stored. +message Tombstones { + BlockTombstones blocks = 1; + // For now, we only have block tombstones created due to the + // compaction process. Later, we may add more types of tombstones, + // e.g, deleted tenant (shard), partition, dataset, series etc. + // Exactly one member of Tombstones should be present. } -// One compaction job may result in multiple output blocks. -message CompactionJob { - // Unique name of the job. +message BlockTombstones { string name = 1; - CompactionOptions options = 2; - // List of the input blocks. - repeated metastore.v1.BlockMeta blocks = 3; - CompactionJobStatus status = 4; - // Fencing token. - uint64 raft_log_index = 5; - // Shard the blocks belong to. - uint32 shard = 6; - // Optional, empty for compaction level 0. - string tenant_id = 7; - uint32 compaction_level = 8; + uint32 shard = 2; + string tenant = 3; + uint32 compaction_level = 4; + repeated string blocks = 5; } -message CompactionOptions { - // Compaction planner should instruct the compactor - // worker how to compact the blocks: - // - Limits and tenant overrides. - // - Feature flags. +message CompactionJobAssignment { + string name = 1; + uint64 token = 2; + int64 lease_expires_at = 3; +} - // How often the compaction worker should update - // the job status. If overdue, the job ownership - // is revoked. - uint64 status_update_interval_seconds = 1; +message CompactionJobStatusUpdate { + string name = 1; + uint64 token = 2; + CompactionJobStatus status = 3; + // Only present if the job completed successfully. + CompactedBlocks compacted_blocks = 4; } -message CompactionJobStatus { - string job_name = 1; - // Status update allows the planner to keep - // track of the job ownership and compaction - // progress: - // - If the job status is other than IN_PROGRESS, - // the ownership of the job is revoked. - // - FAILURE must only be sent if the failure is - // persistent and the compaction can't be accomplished. - // - completed_job must be empty if the status is - // other than SUCCESS, and vice-versa. - // - UNSPECIFIED must be sent if the worker rejects - // or cancels the compaction job. - // - // Partial results/status is not allowed. - CompactionStatus status = 2; - CompletedJob completed_job = 3; - // Fencing token. - uint64 raft_log_index = 4; - // Shard the blocks belong to. - uint32 shard = 5; - // Optional, empty for compaction level 0. - string tenant_id = 6; +message CompactedBlocks { + metastore.v1.BlockList source_blocks = 1; + repeated metastore.v1.BlockMeta new_blocks = 2; } -enum CompactionStatus { +enum CompactionJobStatus { COMPACTION_STATUS_UNSPECIFIED = 0; COMPACTION_STATUS_IN_PROGRESS = 1; COMPACTION_STATUS_SUCCESS = 2; - COMPACTION_STATUS_FAILURE = 3; - COMPACTION_STATUS_CANCELLED = 4; -} - -message CompletedJob { - repeated metastore.v1.BlockMeta blocks = 1; } diff --git a/api/metastore/v1/index.proto b/api/metastore/v1/index.proto index cc811a9387..f79bd43b98 100644 --- a/api/metastore/v1/index.proto +++ b/api/metastore/v1/index.proto @@ -6,6 +6,7 @@ import "metastore/v1/types.proto"; service IndexService { rpc AddBlock(AddBlockRequest) returns (AddBlockResponse) {} + rpc GetBlockMetadata(GetBlockMetadataRequest) returns (GetBlockMetadataResponse) {} } message AddBlockRequest { @@ -13,3 +14,11 @@ message AddBlockRequest { } message AddBlockResponse {} + +message GetBlockMetadataRequest { + BlockList blocks = 1; +} + +message GetBlockMetadataResponse { + repeated BlockMeta blocks = 1; +} diff --git a/api/metastore/v1/raft_log/raft_log.proto b/api/metastore/v1/raft_log/raft_log.proto index 56436bbd8e..13b4830d06 100644 --- a/api/metastore/v1/raft_log/raft_log.proto +++ b/api/metastore/v1/raft_log/raft_log.proto @@ -2,13 +2,104 @@ syntax = "proto3"; package raft_log; +import "metastore/v1/compactor.proto"; +import "metastore/v1/types.proto"; + enum RaftCommand { RAFT_COMMAND_UNKNOWN = 0; - RAFT_COMMAND_ADD_BLOCK = 1; - RAFT_COMMAND_POLL_COMPACTION_JOBS = 2; - RAFT_COMMAND_CLEAN_BLOCKS = 3; + RAFT_COMMAND_ADD_BLOCK_METADATA = 1; + RAFT_COMMAND_GET_COMPACTION_PLAN_UPDATE = 2; + RAFT_COMMAND_UPDATE_COMPACTION_PLAN = 3; +} + +message AddBlockMetadataRequest { + metastore.v1.BlockMeta metadata = 1; +} + +message AddBlockMetadataResponse {} + +// GetCompactionPlanUpdateRequest requests CompactionPlanUpdate. +// The resulting plan should be proposed to the raft members. +// This is a read-only operation: it MUST NOT alter the state. +message GetCompactionPlanUpdateRequest { + // CompactionJobStatusUpdate is a change + // requested by the compaction worker. + repeated CompactionJobStatusUpdate status_updates = 1; + uint32 assign_jobs_max = 2; +} + +message CompactionJobStatusUpdate { + string name = 1; + uint64 token = 2; + metastore.v1.CompactionJobStatus status = 3; +} + +// GetCompactionPlanUpdateResponse includes the planned change. +// The plan should be proposed to the raft members. +message GetCompactionPlanUpdateResponse { + uint64 term = 1; + CompactionPlanUpdate plan_update = 2; +} + +message CompactionPlanUpdate { + repeated NewCompactionJob new_jobs = 1; + repeated AssignedCompactionJob assigned_jobs = 2; + repeated UpdatedCompactionJob updated_jobs = 3; + repeated CompletedCompactionJob completed_jobs = 4; +} + +message NewCompactionJob { + CompactionJobState state = 1; + CompactionJobPlan plan = 2; +} + +message AssignedCompactionJob { + CompactionJobState state = 1; + CompactionJobPlan plan = 2; +} + +message UpdatedCompactionJob { + CompactionJobState state = 1; +} + +message CompletedCompactionJob { + CompactionJobState state = 1; + metastore.v1.CompactedBlocks compacted_blocks = 2; +} + +// CompactionJobState is produced in response to +// the compaction worker status update request. +// +// Compaction level and other attributes that +// affect the scheduling order or status update +// handling should be included into the message. +message CompactionJobState { + string name = 1; + uint32 compaction_level = 2; + metastore.v1.CompactionJobStatus status = 3; + uint64 token = 4; + int64 lease_expires_at = 5; + int64 added_at = 6; + uint32 failures = 7; +} + +message CompactionJobPlan { + string name = 1; + // Blocks to be compacted. + string tenant = 2; + uint32 shard = 3; + uint32 compaction_level = 4; + repeated string source_blocks = 5; + // Objects to be deleted. + repeated metastore.v1.Tombstones tombstones = 6; +} + +// UpdateCompactionPlanRequest proposes compaction plan changes. +message UpdateCompactionPlanRequest { + uint64 term = 1; + CompactionPlanUpdate plan_update = 2; } -message CleanBlocksRequest { - string request_id = 1; +message UpdateCompactionPlanResponse { + CompactionPlanUpdate plan_update = 1; } diff --git a/api/metastore/v1/types.proto b/api/metastore/v1/types.proto index 902f6d9e34..8ce57d1b4e 100644 --- a/api/metastore/v1/types.proto +++ b/api/metastore/v1/types.proto @@ -4,6 +4,12 @@ package metastore.v1; import "types/v1/types.proto"; +message BlockList { + string tenant = 1; + uint32 shard = 2; + repeated string blocks = 3; +} + message BlockMeta { uint64 format_version = 1; string id = 2; diff --git a/api/openapiv2/gen/phlare.swagger.json b/api/openapiv2/gen/phlare.swagger.json index 1198fe25e2..80d52beb6d 100644 --- a/api/openapiv2/gen/phlare.swagger.json +++ b/api/openapiv2/gen/phlare.swagger.json @@ -353,6 +353,25 @@ } } }, + "metastorev1CompactionJobStatusUpdate": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "token": { + "type": "string", + "format": "uint64" + }, + "status": { + "$ref": "#/definitions/v1CompactionJobStatus" + }, + "compactedBlocks": { + "$ref": "#/definitions/v1CompactedBlocks", + "description": "Only present if the job completed successfully." + } + } + }, "protobufAny": { "type": "object", "properties": { @@ -561,6 +580,24 @@ } } }, + "v1BlockList": { + "type": "object", + "properties": { + "tenant": { + "type": "string" + }, + "shard": { + "type": "integer", + "format": "int64" + }, + "blocks": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, "v1BlockMeta": { "type": "object", "properties": { @@ -649,6 +686,31 @@ } } }, + "v1BlockTombstones": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "shard": { + "type": "integer", + "format": "int64" + }, + "tenant": { + "type": "string" + }, + "compactionLevel": { + "type": "integer", + "format": "int64" + }, + "blocks": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, "v1CommitAuthor": { "type": "object", "properties": { @@ -687,111 +749,78 @@ } } }, - "v1CompactionJob": { + "v1CompactedBlocks": { "type": "object", "properties": { - "name": { - "type": "string", - "description": "Unique name of the job." - }, - "options": { - "$ref": "#/definitions/v1CompactionOptions" + "sourceBlocks": { + "$ref": "#/definitions/v1BlockList" }, - "blocks": { + "newBlocks": { "type": "array", "items": { "type": "object", "$ref": "#/definitions/v1BlockMeta" - }, - "description": "List of the input blocks." - }, - "status": { - "$ref": "#/definitions/v1CompactionJobStatus" - }, - "raftLogIndex": { - "type": "string", - "format": "uint64", - "description": "Fencing token." + } + } + } + }, + "v1CompactionJob": { + "type": "object", + "properties": { + "name": { + "type": "string" }, "shard": { "type": "integer", - "format": "int64", - "description": "Shard the blocks belong to." + "format": "int64" }, - "tenantId": { - "type": "string", - "description": "Optional, empty for compaction level 0." + "tenant": { + "type": "string" }, "compactionLevel": { "type": "integer", "format": "int64" + }, + "sourceBlocks": { + "type": "array", + "items": { + "type": "string" + } + }, + "tombstones": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/v1Tombstones" + } } - }, - "description": "One compaction job may result in multiple output blocks." + } }, - "v1CompactionJobStatus": { + "v1CompactionJobAssignment": { "type": "object", "properties": { - "jobName": { + "name": { "type": "string" }, - "status": { - "$ref": "#/definitions/v1CompactionStatus", - "description": "Status update allows the planner to keep\ntrack of the job ownership and compaction\nprogress:\n- If the job status is other than IN_PROGRESS,\n the ownership of the job is revoked.\n- FAILURE must only be sent if the failure is\n persistent and the compaction can't be accomplished.\n- completed_job must be empty if the status is\n other than SUCCESS, and vice-versa.\n- UNSPECIFIED must be sent if the worker rejects\n or cancels the compaction job.\n\nPartial results/status is not allowed." - }, - "completedJob": { - "$ref": "#/definitions/v1CompletedJob" - }, - "raftLogIndex": { + "token": { "type": "string", - "format": "uint64", - "description": "Fencing token." - }, - "shard": { - "type": "integer", - "format": "int64", - "description": "Shard the blocks belong to." + "format": "uint64" }, - "tenantId": { + "leaseExpiresAt": { "type": "string", - "description": "Optional, empty for compaction level 0." + "format": "int64" } } }, - "v1CompactionOptions": { - "type": "object", - "properties": { - "statusUpdateIntervalSeconds": { - "type": "string", - "format": "uint64", - "description": "How often the compaction worker should update\nthe job status. If overdue, the job ownership\nis revoked." - } - }, - "description": "Compaction planner should instruct the compactor\n worker how to compact the blocks:\n - Limits and tenant overrides.\n - Feature flags." - }, - "v1CompactionStatus": { + "v1CompactionJobStatus": { "type": "string", "enum": [ "COMPACTION_STATUS_UNSPECIFIED", "COMPACTION_STATUS_IN_PROGRESS", - "COMPACTION_STATUS_SUCCESS", - "COMPACTION_STATUS_FAILURE", - "COMPACTION_STATUS_CANCELLED" + "COMPACTION_STATUS_SUCCESS" ], "default": "COMPACTION_STATUS_UNSPECIFIED" }, - "v1CompletedJob": { - "type": "object", - "properties": { - "blocks": { - "type": "array", - "items": { - "type": "object", - "$ref": "#/definitions/v1BlockMeta" - } - } - } - }, "v1Dataset": { "type": "object", "properties": { @@ -951,6 +980,18 @@ } } }, + "v1GetBlockMetadataResponse": { + "type": "object", + "properties": { + "blocks": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/v1BlockMeta" + } + } + } + }, "v1GetBlockStatsResponse": { "type": "object", "properties": { @@ -1458,6 +1499,13 @@ "type": "object", "$ref": "#/definitions/v1CompactionJob" } + }, + "assignments": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/v1CompactionJobAssignment" + } } } }, @@ -2131,6 +2179,16 @@ } } }, + "v1Tombstones": { + "type": "object", + "properties": { + "blocks": { + "$ref": "#/definitions/v1BlockTombstones", + "description": "For now, we only have block tombstones created due to the\n compaction process. Later, we may add more types of tombstones,\n e.g, deleted tenant (shard), partition, dataset, series etc.\n Exactly one member of Tombstones should be present." + } + }, + "description": "Tombstones represent objects removed from the index but still stored." + }, "v1TreeQuery": { "type": "object", "properties": { diff --git a/ebpf/go.mod b/ebpf/go.mod index 664ca6c4cc..d39fc1fd02 100644 --- a/ebpf/go.mod +++ b/ebpf/go.mod @@ -20,7 +20,7 @@ require ( github.com/samber/lo v1.38.1 github.com/stretchr/testify v1.9.0 github.com/ulikunitz/xz v0.5.12 - golang.org/x/sys v0.25.0 + golang.org/x/sys v0.27.0 ) require ( diff --git a/ebpf/go.sum b/ebpf/go.sum index bf33d1c887..4316b32d8f 100644 --- a/ebpf/go.sum +++ b/ebpf/go.sum @@ -82,8 +82,8 @@ golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= diff --git a/go.mod b/go.mod index 912392cb82..a36b04452c 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/dolthub/swiss v0.2.1 github.com/drone/envsubst v1.0.3 github.com/dustin/go-humanize v1.0.1 - github.com/fatih/color v1.15.0 + github.com/fatih/color v1.18.0 github.com/felixge/fgprof v0.9.4-0.20221116204635-ececf7638e93 github.com/felixge/httpsnoop v1.0.4 github.com/fsnotify/fsnotify v1.7.0 @@ -40,14 +40,14 @@ require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru/v2 v2.0.7 - github.com/hashicorp/raft v1.7.0 + github.com/hashicorp/raft v1.7.2-0.20241119084901-7e8e836fe2e8 github.com/hashicorp/raft-wal v0.4.1 github.com/iancoleman/strcase v0.3.0 github.com/json-iterator/go v1.1.12 github.com/k0kubun/pp/v3 v3.2.0 github.com/klauspost/compress v1.17.10 github.com/kubescape/go-git-url v0.0.27 - github.com/mattn/go-isatty v0.0.19 + github.com/mattn/go-isatty v0.0.20 github.com/minio/minio-go/v7 v7.0.72 github.com/mitchellh/go-wordwrap v1.0.1 github.com/oauth2-proxy/oauth2-proxy/v7 v7.5.1 @@ -86,7 +86,7 @@ require ( golang.org/x/net v0.28.0 golang.org/x/oauth2 v0.21.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.25.0 + golang.org/x/sys v0.27.0 golang.org/x/text v0.19.0 golang.org/x/time v0.6.0 gonum.org/v1/plot v0.14.0 @@ -179,14 +179,14 @@ require ( github.com/hashicorp/consul/api v1.28.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.6.2 // indirect + github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-msgpack v1.1.5 // indirect - github.com/hashicorp/go-msgpack/v2 v2.1.1 // indirect + github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-sockaddr v1.0.6 // indirect github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/golang-lru v0.6.0 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/memberlist v0.5.0 // indirect github.com/hashicorp/serf v0.10.1 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect diff --git a/go.sum b/go.sum index d3bc6cb61f..5a0df567c1 100644 --- a/go.sum +++ b/go.sum @@ -230,8 +230,8 @@ github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/fgprof v0.9.4-0.20221116204635-ececf7638e93 h1:S8ZdFFDRXUKs3fHpMDPVh9oWd46hKqEEt/X3oxhtF5Q= github.com/felixge/fgprof v0.9.4-0.20221116204635-ececf7638e93/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -439,16 +439,16 @@ github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= -github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= -github.com/hashicorp/go-msgpack/v2 v2.1.1 h1:xQEY9yB2wnHitoSzk/B9UjXWRQ67QKu5AOm8aFp8N3I= -github.com/hashicorp/go-msgpack/v2 v2.1.1/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= +github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0= +github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= @@ -470,16 +470,16 @@ github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mO github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= -github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 h1:fI1LXuBaS1d9z1kmb++Og6YD8uMRwadXorCwE+xgOFA= github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702/go.mod h1:z71gkJdrkAt/Rl6C7Q79VE7AwJ5lUF+M+fzFTyIHYB0= -github.com/hashicorp/raft v1.7.0 h1:4u24Qn6lQ6uwziM++UgsyiT64Q8GyRn43CV41qPiz1o= -github.com/hashicorp/raft v1.7.0/go.mod h1:N1sKh6Vn47mrWvEArQgILTyng8GoDRNYlgKyK7PMjs0= +github.com/hashicorp/raft v1.7.2-0.20241119084901-7e8e836fe2e8 h1:d2HabIDMkwzIKw+w82mZYelwMy4giCbpX4mjDQxmeuk= +github.com/hashicorp/raft v1.7.2-0.20241119084901-7e8e836fe2e8/go.mod h1:hUeiEwQQR/Nk2iKDD0dkEhklSsu3jcAcqvPzPoZSAEM= github.com/hashicorp/raft-wal v0.4.1 h1:aU8XZ6x8R9BAIB/83Z1dTDtXvDVmv9YVYeXxd/1QBSA= github.com/hashicorp/raft-wal v0.4.1/go.mod h1:A6vP5o8hGOs1LHfC1Okh9xPwWDcmb6Vvuz/QyqUXlOE= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= @@ -560,8 +560,8 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= @@ -983,8 +983,8 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= diff --git a/pkg/experiment/compactor/compaction_worker.go b/pkg/experiment/compactor/compaction_worker.go index 3ba80cba65..2acf15a418 100644 --- a/pkg/experiment/compactor/compaction_worker.go +++ b/pkg/experiment/compactor/compaction_worker.go @@ -6,263 +6,370 @@ import ( "fmt" "os" "path/filepath" - "runtime" - "runtime/debug" + "strconv" + "strings" "sync" + "sync/atomic" "time" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/grafana/dskit/services" + "github.com/oklog/ulid" "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sync/errgroup" metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" - "github.com/grafana/pyroscope/pkg/experiment/metastore/client" "github.com/grafana/pyroscope/pkg/experiment/query_backend/block" "github.com/grafana/pyroscope/pkg/objstore" - _ "go.uber.org/automaxprocs" + "github.com/grafana/pyroscope/pkg/util" ) type Worker struct { - *services.BasicService - - config Config - logger log.Logger - metastoreClient *metastoreclient.Client - storage objstore.Bucket - metrics *compactionWorkerMetrics - - jobMutex sync.RWMutex - pendingJobs map[string]*metastorev1.CompactionJob - activeJobs map[string]*metastorev1.CompactionJob - completedJobs map[string]*metastorev1.CompactionJobStatus - - queue chan *metastorev1.CompactionJob - wg sync.WaitGroup + service services.Service + + logger log.Logger + config Config + client MetastoreClient + storage objstore.Bucket + metrics *metrics + + jobs map[string]*compactionJob + queue chan *compactionJob + threads int + capacity atomic.Int32 + + stopped atomic.Bool + closeOnce sync.Once + wg sync.WaitGroup } type Config struct { - JobConcurrency int `yaml:"job_capacity"` - JobPollInterval time.Duration `yaml:"job_poll_interval"` - SmallObjectSize int `yaml:"small_object_size_bytes"` - TempDir string `yaml:"temp_dir"` + JobConcurrency util.ConcurrencyLimit `yaml:"job_capacity"` + JobPollInterval time.Duration `yaml:"job_poll_interval"` + SmallObjectSize int `yaml:"small_object_size_bytes"` + TempDir string `yaml:"temp_dir"` + RequestTimeout time.Duration `yaml:"request_timeout"` } func (cfg *Config) RegisterFlags(f *flag.FlagSet) { const prefix = "compaction-worker." tempdir := filepath.Join(os.TempDir(), "pyroscope-compactor") - f.IntVar(&cfg.JobConcurrency, prefix+"job-concurrency", 1, "How many concurrent jobs will a compaction worker run at most.") - f.DurationVar(&cfg.JobPollInterval, prefix+"job-poll-interval", 5*time.Second, "How often will a compaction worker poll for jobs.") + f.TextVar(&cfg.JobConcurrency, prefix+"job-concurrency", util.GoMaxProcsConcurrencyLimit(), "Number of concurrent jobs compaction worker will run. Defaults to the number of CPU cores.") + f.DurationVar(&cfg.JobPollInterval, prefix+"job-poll-interval", 5*time.Second, "Interval between job requests") + f.DurationVar(&cfg.RequestTimeout, prefix+"request-timeout", 5*time.Second, "Job request timeout.") f.IntVar(&cfg.SmallObjectSize, prefix+"small-object-size-bytes", 8<<20, "Size of the object that can be loaded in memory.") f.StringVar(&cfg.TempDir, prefix+"temp-dir", tempdir, "Temporary directory for compaction jobs.") } -func (cfg *Config) Validate() error { - // TODO(kolesnikovae): implement. - return nil +type compactionJob struct { + *metastorev1.CompactionJob + + ctx context.Context + cancel context.CancelFunc + done atomic.Bool + + blocks []*metastorev1.BlockMeta + assignment *metastorev1.CompactionJobAssignment + compacted *metastorev1.CompactedBlocks } -func New(config Config, logger log.Logger, metastoreClient *metastoreclient.Client, storage objstore.Bucket, reg prometheus.Registerer) (*Worker, error) { - workers := runtime.GOMAXPROCS(-1) * config.JobConcurrency +type MetastoreClient interface { + metastorev1.CompactionServiceClient + metastorev1.IndexServiceClient +} + +func New( + logger log.Logger, + config Config, + client MetastoreClient, + storage objstore.Bucket, + reg prometheus.Registerer, +) (*Worker, error) { w := &Worker{ - config: config, - logger: logger, - metastoreClient: metastoreClient, - storage: storage, - pendingJobs: make(map[string]*metastorev1.CompactionJob), - activeJobs: make(map[string]*metastorev1.CompactionJob), - completedJobs: make(map[string]*metastorev1.CompactionJobStatus), - metrics: newMetrics(reg), - queue: make(chan *metastorev1.CompactionJob, workers), + config: config, + logger: logger, + client: client, + storage: storage, + metrics: newMetrics(reg), } - w.BasicService = services.NewBasicService(w.starting, w.running, w.stopping) + w.threads = int(config.JobConcurrency) + w.queue = make(chan *compactionJob, 2*w.threads) + w.jobs = make(map[string]*compactionJob, 2*w.threads) + w.capacity.Store(int32(w.threads)) + w.service = services.NewBasicService(w.starting, w.running, w.stopping) return w, nil } -func (w *Worker) starting(ctx context.Context) (err error) { - return nil -} +func (w *Worker) Service() services.Service { return w.service } + +func (w *Worker) starting(context.Context) (err error) { return nil } + +func (w *Worker) stopping(error) error { return nil } func (w *Worker) running(ctx context.Context) error { ticker := time.NewTicker(w.config.JobPollInterval) - defer ticker.Stop() - for i := 0; i < cap(w.queue); i++ { - w.wg.Add(1) + stopPolling := make(chan struct{}) + pollingDone := make(chan struct{}) + go func() { + defer close(pollingDone) + for { + select { + case <-stopPolling: + return + case <-ticker.C: + w.poll() + } + } + }() + + w.wg.Add(w.threads) + for i := 0; i < w.threads; i++ { go func() { defer w.wg.Done() - w.jobsLoop(ctx) + level.Info(w.logger).Log("msg", "compaction worker thread started") + for job := range w.queue { + w.capacity.Add(-1) + util.Recover(func() { w.runCompaction(job) }) + job.done.Store(true) + w.capacity.Add(1) + } }() } - for { - select { - case <-ticker.C: - w.poll(ctx) + <-ctx.Done() + // Wait for all threads to finish their work, continuing to report status + // updates about the in-progress jobs. First, signal to the poll loop that + // we're done with new jobs. + w.stopped.Store(true) + level.Info(w.logger).Log("msg", "waiting for all jobs to finish") + w.wg.Wait() + + // Now that all the threads are done, we stop the polling loop. + ticker.Stop() + close(stopPolling) + <-pollingDone + return nil +} - case <-ctx.Done(): - w.wg.Wait() - return nil +func (w *Worker) poll() { + // Check if we want to stop polling for new jobs. + // Close the queue if this is not the case. + var capacity uint32 + if w.stopped.Load() { + w.closeOnce.Do(func() { + level.Info(w.logger).Log("msg", "closing job queue") + close(w.queue) + }) + } else { + // We report the number of free workers in a hope to get more jobs. + // Note that cap(w.queue) - len(w.queue) will only report 0 when all + // the workers are busy and the queue is full (in fact, doubling the + // reported capacity). + if c := w.capacity.Load(); c > 0 { + capacity = uint32(c) } } -} -func (w *Worker) jobsLoop(ctx context.Context) { - for { + updates := w.collectUpdates() + if len(updates) == 0 && capacity == 0 { + level.Info(w.logger).Log("msg", "skipping polling", "updates", len(updates), "capacity", capacity) + return + } + + level.Info(w.logger).Log("msg", "polling compaction jobs", "updates", len(updates), "capacity", capacity) + ctx, cancel := context.WithTimeout(context.Background(), w.config.RequestTimeout) + defer cancel() + resp, err := w.client.PollCompactionJobs(ctx, &metastorev1.PollCompactionJobsRequest{ + StatusUpdates: updates, + JobCapacity: capacity, + }) + if err != nil { + level.Error(w.logger).Log("msg", "failed to poll compaction jobs", "err", err) + return + } + + w.cleanup(updates) + newJobs := w.handleResponse(resp) + for _, job := range newJobs { select { - case <-ctx.Done(): - return - - case job := <-w.queue: - w.jobMutex.Lock() - delete(w.pendingJobs, job.Name) - w.activeJobs[job.Name] = job - w.jobMutex.Unlock() - - _ = level.Info(w.logger).Log("msg", "starting compaction job", "job", job.Name) - status := w.startJob(ctx, job) - _ = level.Info(w.logger).Log("msg", "compaction job finished", "job", job.Name) - - w.jobMutex.Lock() - delete(w.activeJobs, job.Name) - w.completedJobs[job.Name] = status - w.jobMutex.Unlock() + case w.queue <- job: + default: + level.Warn(w.logger).Log("msg", "dropping job", "job_name", job.Name) + w.remove(job) } } } -func (w *Worker) poll(ctx context.Context) { - w.jobMutex.Lock() - level.Debug(w.logger).Log( - "msg", "polling for compaction jobs and status updates", - "active_jobs", len(w.activeJobs), - "pending_jobs", len(w.pendingJobs), - "pending_updates", len(w.completedJobs)) - - pendingStatusUpdates := make([]*metastorev1.CompactionJobStatus, 0, len(w.completedJobs)) - for _, update := range w.completedJobs { - level.Debug(w.logger).Log("msg", "completed job update", "job", update.JobName, "status", update.Status) - pendingStatusUpdates = append(pendingStatusUpdates, update) - } - for _, activeJob := range w.activeJobs { - level.Debug(w.logger).Log("msg", "in progress job update", "job", activeJob.Name) - update := activeJob.Status.CloneVT() - update.Status = metastorev1.CompactionStatus_COMPACTION_STATUS_IN_PROGRESS - pendingStatusUpdates = append(pendingStatusUpdates, update) - } - for _, pendingJob := range w.pendingJobs { - level.Debug(w.logger).Log("msg", "pending job update", "job", pendingJob.Name) - update := pendingJob.Status.CloneVT() - update.Status = metastorev1.CompactionStatus_COMPACTION_STATUS_IN_PROGRESS - pendingStatusUpdates = append(pendingStatusUpdates, update) +func (w *Worker) collectUpdates() []*metastorev1.CompactionJobStatusUpdate { + updates := make([]*metastorev1.CompactionJobStatusUpdate, 0, len(w.jobs)) + for _, job := range w.jobs { + update := &metastorev1.CompactionJobStatusUpdate{ + Name: job.Name, + Token: job.assignment.Token, + } + + switch done := job.done.Load(); { + case done && job.compacted != nil: + level.Info(w.logger).Log("msg", "sending update for completed job", "job", job.Name) + update.Status = metastorev1.CompactionJobStatus_COMPACTION_STATUS_SUCCESS + update.CompactedBlocks = job.compacted + updates = append(updates, update) + + case done && job.compacted == nil: + // We're not sending a status update for the job and expect that the + // assigment is to be revoked. The job is to be removed at the next + // poll response handling: all jobs without assignments are canceled + // and removed. + level.Warn(w.logger).Log("msg", "skipping update for abandoned job", "job", job.Name) + + default: + level.Info(w.logger).Log("msg", "sending update for in-progress job", "job", job.Name) + update.Status = metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS + updates = append(updates, update) + } } - jobCapacity := cap(w.queue) - len(w.queue) - w.jobMutex.Unlock() + return updates +} - if len(pendingStatusUpdates) > 0 || jobCapacity > 0 { - jobsResponse, err := w.metastoreClient.PollCompactionJobs(ctx, &metastorev1.PollCompactionJobsRequest{ - JobStatusUpdates: pendingStatusUpdates, - JobCapacity: uint32(jobCapacity), - }) +func (w *Worker) cleanup(updates []*metastorev1.CompactionJobStatusUpdate) { + for _, update := range updates { + if job := w.jobs[update.Name]; job != nil && job.done.Load() { + switch update.Status { + case metastorev1.CompactionJobStatus_COMPACTION_STATUS_SUCCESS: + // In the vast majority of cases, we end up here. + w.remove(job) + + case metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS: + // It is possible that the job has been completed after we + // prepared the status update: keep the job for the next + // poll iteration. - if err != nil { - level.Error(w.logger).Log("msg", "failed to poll compaction jobs", "err", err) - return + default: + // Workers never send other statuses. It's unexpected to get here. + level.Warn(w.logger).Log("msg", "unexpected job status transition; removing the job", "job", job.Name) + w.remove(job) + } } + } +} - level.Debug(w.logger).Log("msg", "poll response received", "compaction_jobs", len(jobsResponse.CompactionJobs)) +func (w *Worker) remove(job *compactionJob) { + delete(w.jobs, job.Name) + job.cancel() +} - pendingJobs := make([]*metastorev1.CompactionJob, 0, len(jobsResponse.CompactionJobs)) - for _, job := range jobsResponse.CompactionJobs { - pendingJobs = append(pendingJobs, job.CloneVT()) - } +func (w *Worker) handleResponse(resp *metastorev1.PollCompactionJobsResponse) (newJobs []*compactionJob) { + // Assignments by job name. + assignments := make(map[string]*metastorev1.CompactionJobAssignment, len(resp.Assignments)) + for _, assignment := range resp.Assignments { + assignments[assignment.Name] = assignment + } - w.jobMutex.Lock() - for _, update := range pendingStatusUpdates { - delete(w.completedJobs, update.JobName) - } - for _, job := range pendingJobs { - w.pendingJobs[job.Name] = job + for _, job := range w.jobs { + if assignment, ok := assignments[job.assignment.Name]; ok { + // In theory, we should respect the lease expiration time. + // In practice, we have a static polling interval. + job.assignment = assignment + } else { + // The job is running without an assigment. + // We don't care how and when it ends. + level.Warn(w.logger).Log("msg", "job re-assigned to another worker; cancelling", "job", job.Name) + w.remove(job) } - w.jobMutex.Unlock() + } - for _, job := range pendingJobs { - select { - case w.queue <- job: - default: - level.Warn(w.logger).Log("msg", "dropping job", "job_name", job.Name) - w.jobMutex.Lock() - delete(w.pendingJobs, job.Name) - w.jobMutex.Unlock() + for _, newJob := range resp.CompactionJobs { + if running, found := w.jobs[newJob.Name]; found { + level.Warn(w.logger).Log("msg", "job re-assigned to the same worker", "job", running.Name) + // We're free to choose what to do. For now, we update the + // assignment (in case the token has changed) and let the + // running job finish. + if running.assignment = assignments[running.Name]; running.assignment != nil { + continue } } + job := &compactionJob{CompactionJob: newJob} + if job.assignment = assignments[newJob.Name]; job.assignment == nil { + // That should not be possible, logging it here just in case. + level.Warn(w.logger).Log("msg", "found a job without assigment", "job", job.Name) + continue + } + job.ctx, job.cancel = context.WithCancel(context.Background()) + newJobs = append(newJobs, job) + w.jobs[job.Name] = job } -} -func (w *Worker) stopping(err error) error { - // TODO aleks: handle shutdown - return nil + return newJobs } -func (w *Worker) startJob(ctx context.Context, job *metastorev1.CompactionJob) *metastorev1.CompactionJobStatus { - jobStartTime := time.Now() - labels := []string{job.TenantId, fmt.Sprint(job.Shard), fmt.Sprint(job.CompactionLevel)} - statusName := "unknown" +func (w *Worker) runCompaction(job *compactionJob) { + start := time.Now() + labels := []string{job.Tenant, strconv.Itoa(int(job.CompactionLevel))} + statusName := "failure" defer func() { - elapsed := time.Since(jobStartTime) - jobStatusLabel := append(labels, statusName) - w.metrics.jobDuration.WithLabelValues(jobStatusLabel...).Observe(elapsed.Seconds()) - w.metrics.jobsCompleted.WithLabelValues(jobStatusLabel...).Inc() + labelsWithStatus := append(labels, statusName) + w.metrics.jobDuration.WithLabelValues(labelsWithStatus...).Observe(time.Since(start).Seconds()) + w.metrics.jobsCompleted.WithLabelValues(labelsWithStatus...).Inc() w.metrics.jobsInProgress.WithLabelValues(labels...).Dec() }() - w.metrics.jobsInProgress.WithLabelValues(labels...).Inc() - sp, ctx := opentracing.StartSpanFromContext(ctx, "StartCompactionJob", + w.metrics.jobsInProgress.WithLabelValues(labels...).Inc() + sp, ctx := opentracing.StartSpanFromContext(job.ctx, "runCompaction", opentracing.Tag{Key: "Job", Value: job.String()}, - opentracing.Tag{Key: "Tenant", Value: job.TenantId}, + opentracing.Tag{Key: "Tenant", Value: job.Tenant}, opentracing.Tag{Key: "Shard", Value: job.Shard}, opentracing.Tag{Key: "CompactionLevel", Value: job.CompactionLevel}, - opentracing.Tag{Key: "BlockCount", Value: len(job.Blocks)}, + opentracing.Tag{Key: "SourceBlocks", Value: len(job.SourceBlocks)}, + opentracing.Tag{Key: "Tombstones", Value: len(job.Tombstones)}, ) defer sp.Finish() - _ = level.Info(w.logger).Log( - "msg", "compacting blocks for job", - "job", job.Name, - "blocks", len(job.Blocks)) + logger := log.With(w.logger, "job", job.Name) + deleteGroup, deleteCtx := errgroup.WithContext(ctx) + for _, t := range job.Tombstones { + if b := t.GetBlocks(); b != nil { + deleteGroup.Go(func() error { + // TODO(kolesnikovae): Clarify guarantees of cleanup. + // Currently, we ignore any cleanup failures, as it's unlikely + // that anyone would want to stop compaction due to a failed + // cleanup. However, we should make this behavior configurable: + // if cleanup fails, the entire job should be retried. + w.deleteBlocks(deleteCtx, logger, b) + return nil + }) + } + } + + level.Info(logger).Log( + "msg", "starting compaction job", + "source_blocks", strings.Join(job.SourceBlocks, " "), + ) + if err := w.getBlockMetadata(logger, job); err != nil { + return + } tempdir := filepath.Join(w.config.TempDir, job.Name) sourcedir := filepath.Join(tempdir, "source") - // TODO(kolesnikovae): Return the actual error once we - // can handle compaction failures in metastore. - compacted, err := pretendEverythingIsOK(func() ([]*metastorev1.BlockMeta, error) { - return block.Compact(ctx, job.Blocks, w.storage, - block.WithCompactionTempDir(tempdir), - block.WithCompactionObjectOptions( - block.WithObjectMaxSizeLoadInMemory(w.config.SmallObjectSize), - block.WithObjectDownload(sourcedir), - ), - ) - }) - - logger := log.With(w.logger, - "job_name", job.Name, - "job_shard", job.Shard, - "job_tenant", job.TenantId, - "job_compaction_level", job.CompactionLevel, - ) + compacted, err := block.Compact(ctx, job.blocks, w.storage, + block.WithCompactionTempDir(tempdir), + block.WithCompactionObjectOptions( + block.WithObjectMaxSizeLoadInMemory(w.config.SmallObjectSize), + block.WithObjectDownload(sourcedir), + )) switch { case err == nil: - _ = level.Info(logger).Log( - "msg", "successful compaction for job", - "input_blocks", len(job.Blocks), - "output_blocks", len(compacted)) - + level.Info(logger).Log( + "msg", "compaction finished successfully", + "input_blocks", len(job.SourceBlocks), + "output_blocks", len(compacted), + ) for _, c := range compacted { - _ = level.Info(logger).Log( + level.Info(logger).Log( "msg", "new compacted block", "block_id", c.Id, "block_tenant", c.TenantId, @@ -271,43 +378,88 @@ func (w *Worker) startJob(ctx context.Context, job *metastorev1.CompactionJob) * "block_compaction_level", c.CompactionLevel, "block_min_time", c.MinTime, "block_max_time", c.MinTime, - "datasets", len(c.Datasets)) + "datasets", len(c.Datasets), + ) } - job.Status.Status = metastorev1.CompactionStatus_COMPACTION_STATUS_SUCCESS - job.Status.CompletedJob = &metastorev1.CompletedJob{Blocks: compacted} statusName = "success" + job.compacted = &metastorev1.CompactedBlocks{ + NewBlocks: compacted, + SourceBlocks: &metastorev1.BlockList{ + Tenant: job.Tenant, + Shard: job.Shard, + Blocks: job.SourceBlocks, + }, + } + + firstBlock := time.UnixMilli(int64(ulid.MustParse(job.blocks[0].Id).Time())) + w.metrics.timeToCompaction.WithLabelValues(labels...).Observe(time.Since(firstBlock).Seconds()) case errors.Is(err, context.Canceled): - _ = level.Warn(logger).Log("msg", "job cancelled", "job", job.Name) - job.Status.Status = metastorev1.CompactionStatus_COMPACTION_STATUS_UNSPECIFIED + level.Warn(logger).Log("msg", "job cancelled") statusName = "cancelled" default: - _ = level.Error(logger).Log("msg", "failed to compact blocks", "err", err, "job", job.Name) - job.Status.Status = metastorev1.CompactionStatus_COMPACTION_STATUS_FAILURE - statusName = "failure" + level.Error(logger).Log("msg", "failed to compact blocks", "err", err) } - return job.Status + // The only error returned by Wait is the context + // cancellation error handled above. + _ = deleteGroup.Wait() } -func pretendEverythingIsOK(fn func() ([]*metastorev1.BlockMeta, error)) (m []*metastorev1.BlockMeta, err error) { - defer func() { - if r := recover(); r != nil { - fmt.Println("ignoring compaction panic:", r) - fmt.Println(string(debug.Stack())) - m = nil - } - if err != nil { - if errors.Is(err, context.Canceled) { - // We can handle this. - return +func (w *Worker) getBlockMetadata(logger log.Logger, job *compactionJob) error { + ctx, cancel := context.WithTimeout(job.ctx, w.config.RequestTimeout) + defer cancel() + + resp, err := w.client.GetBlockMetadata(ctx, &metastorev1.GetBlockMetadataRequest{ + Blocks: &metastorev1.BlockList{ + Tenant: job.Tenant, + Shard: job.Shard, + Blocks: job.SourceBlocks, + }, + }) + if err != nil { + level.Error(logger).Log("msg", "failed to get block metadata", "err", err) + return err + } + + source := resp.GetBlocks() + if len(source) == 0 { + level.Warn(logger).Log( + "msg", "no block metadata found; skipping", + "blocks", len(job.SourceBlocks), + "blocks_found", len(source), + ) + return fmt.Errorf("no blocks to compact") + } + + // Update the plan to reflect the actual compaction job state. + job.SourceBlocks = job.SourceBlocks[:0] + for _, b := range source { + job.SourceBlocks = append(job.SourceBlocks, b.Id) + } + + job.blocks = source + return nil +} + +func (w *Worker) deleteBlocks(ctx context.Context, logger log.Logger, t *metastorev1.BlockTombstones) { + level.Info(logger).Log( + "msg", "deleting blocks", + "tenant", t.Tenant, + "shard", t.Shard, + "compaction_level", t.CompactionLevel, + "blocks", strings.Join(t.Blocks, " "), + ) + for _, b := range t.Blocks { + path := block.BuildObjectPath(t.Tenant, t.Shard, t.CompactionLevel, b) + if err := w.storage.Delete(ctx, path); err != nil { + if objstore.IsNotExist(w.storage, err) { + level.Warn(logger).Log("msg", "block not found", "path", path, "err", err) + continue } - fmt.Println("ignoring compaction error:", err) - m = nil + level.Warn(logger).Log("msg", "failed to delete block", "path", path, "err", err) } - err = nil - }() - return fn() + } } diff --git a/pkg/experiment/compactor/compaction_worker_metrics.go b/pkg/experiment/compactor/compaction_worker_metrics.go index 122bfc3401..71585791d8 100644 --- a/pkg/experiment/compactor/compaction_worker_metrics.go +++ b/pkg/experiment/compactor/compaction_worker_metrics.go @@ -1,36 +1,59 @@ package compactor -import "github.com/prometheus/client_golang/prometheus" +import ( + "time" -type compactionWorkerMetrics struct { - jobsCompleted *prometheus.CounterVec - jobsInProgress *prometheus.GaugeVec - jobDuration *prometheus.HistogramVec + "github.com/prometheus/client_golang/prometheus" + + "github.com/grafana/pyroscope/pkg/util" +) + +type metrics struct { + jobsInProgress *prometheus.GaugeVec + jobsCompleted *prometheus.CounterVec + jobDuration *prometheus.HistogramVec + timeToCompaction *prometheus.HistogramVec } -func newMetrics(r prometheus.Registerer) *compactionWorkerMetrics { - m := &compactionWorkerMetrics{} - - m.jobsCompleted = prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "pyroscope_compaction_jobs_completed_count", - Help: "Total number of compactions that were executed.", - }, []string{"tenant", "shard", "level", "outcome"}) - m.jobsInProgress = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: "pyroscope_compaction_jobs_current", - Help: "The number of active compaction jobs per level", - }, []string{"tenant", "shard", "level"}) - m.jobDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Name: "pyroscope_compaction_jobs_duration_seconds", - Help: "Duration of compaction job runs", - Buckets: prometheus.ExponentialBuckets(1, 2, 14), - }, []string{"tenant", "shard", "level", "outcome"}) - - if r != nil { - r.MustRegister( - m.jobsCompleted, - m.jobsInProgress, - m.jobDuration, - ) +func newMetrics(r prometheus.Registerer) *metrics { + m := &metrics{ + jobsInProgress: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "jobs_in_progress", + Help: "The number of active compaction jobs currently running.", + }, []string{"tenant", "level"}), + + jobsCompleted: prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "jobs_completed_total", + Help: "Total number of compaction jobs completed.", + }, []string{"tenant", "level", "status"}), + + jobDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: "job_duration_seconds", + Help: "Duration of compaction job runs", + + Buckets: prometheus.ExponentialBuckets(1, 300, 16), + NativeHistogramBucketFactor: 1.1, + NativeHistogramMaxBucketNumber: 16, + NativeHistogramMinResetDuration: time.Hour, + }, []string{"tenant", "level", "status"}), + + timeToCompaction: prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: "time_to_compaction_seconds", + Help: "The time elapsed since the oldest compacted block was created.", + + Buckets: prometheus.ExponentialBuckets(1, 3600, 16), + NativeHistogramBucketFactor: 1.1, + NativeHistogramMaxBucketNumber: 16, + NativeHistogramMinResetDuration: time.Hour, + }, []string{"tenant", "level"}), } + + util.Register(r, + m.jobsInProgress, + m.jobsCompleted, + m.jobDuration, + m.timeToCompaction, + ) + return m } diff --git a/pkg/experiment/distributor/distributor.go b/pkg/experiment/distributor/distributor.go index 7e56a3c312..05bb757526 100644 --- a/pkg/experiment/distributor/distributor.go +++ b/pkg/experiment/distributor/distributor.go @@ -11,6 +11,7 @@ import ( "github.com/grafana/dskit/ring" "github.com/grafana/pyroscope/pkg/experiment/distributor/placement" + "github.com/grafana/pyroscope/pkg/iter" ) // NOTE(kolesnikovae): Essentially, we do not depend on the dskit/ring and @@ -68,12 +69,23 @@ func (d *Distributor) updateDistribution(r ring.ReadRing, maxAge time.Duration) return nil } +// emptyMapping is returned by distributor if the ring is empty. +// This helps to handle a case when requests arrive before the +// ring is populated (no instances registered). +var emptyMapping = &placement.ShardMapping{ + Instances: iter.NewEmptyIterator[ring.InstanceDesc](), + Shard: 0, +} + func (d *Distributor) distribute(k placement.Key) *placement.ShardMapping { d.mu.RLock() defer d.mu.RUnlock() // Determine the number of shards for the tenant within the available // space, and the dataset shards within the tenant subring. s := len(d.distribution.shards) + if s == 0 { + return emptyMapping + } p := d.placement.Policy(k) tenantSize := p.TenantShards if tenantSize == 0 || tenantSize > s { diff --git a/pkg/experiment/ingester/singlereplica/singlereplica.go b/pkg/experiment/ingester/singlereplica/singlereplica.go deleted file mode 100644 index 4ea150a30d..0000000000 --- a/pkg/experiment/ingester/singlereplica/singlereplica.go +++ /dev/null @@ -1,25 +0,0 @@ -package singlereplica - -import ( - "time" - - "github.com/grafana/dskit/ring" -) - -// The replication strategy that returns all the instances, regardless -// of their health and placement to allow the caller to decide which -// instances to use on its own. - -type replicationStrategy struct{} - -func (replicationStrategy) Filter( - instances []ring.InstanceDesc, - _ ring.Operation, - _ int, - _ time.Duration, - _ bool, -) ([]ring.InstanceDesc, int, error) { - return instances, 0, nil -} - -func NewReplicationStrategy() ring.ReplicationStrategy { return replicationStrategy{} } diff --git a/pkg/experiment/metastore/cleaner_raft_handler.go b/pkg/experiment/metastore/cleaner_raft_handler.go deleted file mode 100644 index b6ede62403..0000000000 --- a/pkg/experiment/metastore/cleaner_raft_handler.go +++ /dev/null @@ -1,114 +0,0 @@ -package metastore - -import ( - "context" - "fmt" - "path/filepath" - "sync/atomic" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/hashicorp/raft" - "github.com/prometheus/client_golang/prometheus" - "github.com/thanos-io/objstore" - "go.etcd.io/bbolt" - "golang.org/x/sync/errgroup" - "google.golang.org/protobuf/types/known/anypb" - - "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" - "github.com/grafana/pyroscope/pkg/experiment/metastore/markers" -) - -type DeletionMarkers interface { - FindExpiredMarkers(now int64) map[string]*markers.BlockRemovalContext - Remove(tx *bbolt.Tx, markers map[string]*markers.BlockRemovalContext) error -} - -type CleanerCommandHandler struct { - logger log.Logger - bucket objstore.Bucket - markers DeletionMarkers - - bucketObjectRemovals *prometheus.CounterVec - - lastRequestId atomic.Pointer[string] -} - -func NewCleanerCommandHandler( - logger log.Logger, - bucket objstore.Bucket, - markers DeletionMarkers, - reg prometheus.Registerer, -) *CleanerCommandHandler { - m := prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "block_cleaner_bucket_removal_count", - Help: "The number of expired blocks that were removed from the bucket", - }, []string{"tenant", "shard"}) - if reg != nil { - reg.MustRegister(m) - } - return &CleanerCommandHandler{ - logger: logger, - bucket: bucket, - markers: markers, - bucketObjectRemovals: m, - } -} - -func (h *CleanerCommandHandler) ExpectRequest(request string) { - h.lastRequestId.Store(&request) -} - -func (h *CleanerCommandHandler) CleanBlocks(tx *bbolt.Tx, cmd *raft.Log, request *raft_log.CleanBlocksRequest) (*anypb.Any, error) { - expired := h.markers.FindExpiredMarkers(cmd.AppendedAt.UnixMilli()) - localRequestID := h.lastRequestId.Load() - level.Info(h.logger).Log( - "msg", "cleaning expired block deletion markers", - "count", len(expired), - "request_id", request.RequestId, - "stored_request_id", localRequestID, - ) - cleanBucket := localRequestID != nil && request.RequestId == *localRequestID - if cleanBucket { - var cnt atomic.Int64 - g, grpCtx := errgroup.WithContext(context.Background()) - for b, removalContext := range expired { - g.Go(func() error { - var key string - if removalContext.Tenant != "" { - key = filepath.Join("blocks", fmt.Sprint(removalContext.Shard), removalContext.Tenant, b, "block.bin") - } else { - key = filepath.Join("segments", fmt.Sprint(removalContext.Shard), "anonymous", b, "block.bin") - } - level.Debug(h.logger).Log( - "msg", "removing block from bucket", - "shard", removalContext.Shard, - "tenant", removalContext.Tenant, - "blockId", b, - "expiryTs", removalContext.ExpiryTs, - "bucket_key", key) - err := h.bucket.Delete(grpCtx, key) - if err != nil { - level.Warn(h.logger).Log( - "msg", "failed to remove block from bucket", - "err", err, - "blockId", b, - "shard", removalContext.Shard, - "tenant", removalContext.Tenant) - // TODO(aleks-p): Detect if the error is "object does not exist" or something else. Handle each case appropriately. - return err - } - h.bucketObjectRemovals.WithLabelValues(removalContext.Tenant, fmt.Sprint(removalContext.Shard)).Inc() - cnt.Add(1) - return nil - }) - } - err := g.Wait() - level.Info(h.logger).Log("msg", "finished bucket cleanup", "blocks_removed", cnt.Load()) - if err != nil { - return nil, err - } - return nil, h.markers.Remove(tx, expired) - } - return nil, h.markers.Remove(tx, expired) -} diff --git a/pkg/experiment/metastore/cleaner_service.go b/pkg/experiment/metastore/cleaner_service.go deleted file mode 100644 index 2e0fb02328..0000000000 --- a/pkg/experiment/metastore/cleaner_service.go +++ /dev/null @@ -1,90 +0,0 @@ -package metastore - -import ( - "context" - "crypto/rand" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/oklog/ulid" - - "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" - "github.com/grafana/pyroscope/pkg/experiment/metastore/fsm" - "github.com/grafana/pyroscope/pkg/experiment/metastore/markers" -) - -type LocalCleaner interface { - ExpectRequest(request string) -} - -type CleanerService struct { - config markers.Config - logger log.Logger - raftLog Raft - local LocalCleaner - - m sync.Mutex - started bool - cancel context.CancelFunc -} - -func NewCleanerService( - logger log.Logger, - config markers.Config, - raftLog Raft, - local LocalCleaner, -) *CleanerService { - return &CleanerService{ - config: config, - logger: logger, - raftLog: raftLog, - local: local, - } -} - -func (svc *CleanerService) Start() { - svc.m.Lock() - defer svc.m.Unlock() - if svc.started { - svc.logger.Log("msg", "cleaner already started") - return - } - ctx, cancel := context.WithCancel(context.Background()) - svc.cancel = cancel - svc.started = true - go svc.runLoop(ctx) - svc.logger.Log("msg", "cleaner started") -} - -func (svc *CleanerService) Stop() { - svc.m.Lock() - defer svc.m.Unlock() - if !svc.started { - svc.logger.Log("msg", "cleaner already stopped") - return - } - svc.cancel() - svc.started = false - svc.logger.Log("msg", "cleaner stopped") -} - -func (svc *CleanerService) runLoop(ctx context.Context) { - t := time.NewTicker(svc.config.CompactedBlocksCleanupInterval) - defer t.Stop() - for { - select { - case <-ctx.Done(): - return - case <-t.C: - requestID := ulid.MustNew(ulid.Now(), rand.Reader).String() - svc.local.ExpectRequest(requestID) - req := &raft_log.CleanBlocksRequest{RequestId: requestID} - _, err := svc.raftLog.Propose(fsm.RaftLogEntryType(raft_log.RaftCommand_RAFT_COMMAND_CLEAN_BLOCKS), req) - if err != nil { - level.Error(svc.logger).Log("msg", "failed to apply clean blocks command", "err", err) - } - } - } -} diff --git a/pkg/experiment/metastore/client/methods.go b/pkg/experiment/metastore/client/methods.go index 158689b1be..e6d7bbf027 100644 --- a/pkg/experiment/metastore/client/methods.go +++ b/pkg/experiment/metastore/client/methods.go @@ -90,6 +90,12 @@ func (c *Client) AddBlock(ctx context.Context, in *metastorev1.AddBlockRequest, }) } +func (c *Client) GetBlockMetadata(ctx context.Context, in *metastorev1.GetBlockMetadataRequest, opts ...grpc.CallOption) (*metastorev1.GetBlockMetadataResponse, error) { + return invoke(ctx, c, func(ctx context.Context, instance instance) (*metastorev1.GetBlockMetadataResponse, error) { + return instance.GetBlockMetadata(ctx, in, opts...) + }) +} + func (c *Client) QueryMetadata(ctx context.Context, in *metastorev1.QueryMetadataRequest, opts ...grpc.CallOption) (*metastorev1.QueryMetadataResponse, error) { return invoke(ctx, c, func(ctx context.Context, instance instance) (*metastorev1.QueryMetadataResponse, error) { return instance.QueryMetadata(ctx, in, opts...) diff --git a/pkg/experiment/metastore/client/server_mock_test.go b/pkg/experiment/metastore/client/server_mock_test.go index 2a2066c889..4f39e71f3c 100644 --- a/pkg/experiment/metastore/client/server_mock_test.go +++ b/pkg/experiment/metastore/client/server_mock_test.go @@ -57,6 +57,10 @@ func (m *mockServer) AddBlock(ctx context.Context, request *metastorev1.AddBlock return m.metastore.AddBlock(ctx, request) } +func (m *mockServer) GetBlockMetadata(ctx context.Context, request *metastorev1.GetBlockMetadataRequest) (*metastorev1.GetBlockMetadataResponse, error) { + return m.metastore.GetBlockMetadata(ctx, request) +} + func (m *mockServer) QueryMetadata(ctx context.Context, request *metastorev1.QueryMetadataRequest) (*metastorev1.QueryMetadataResponse, error) { return m.metadata.QueryMetadata(ctx, request) } diff --git a/pkg/experiment/metastore/compaction/README.md b/pkg/experiment/metastore/compaction/README.md new file mode 100644 index 0000000000..cb5413d396 --- /dev/null +++ b/pkg/experiment/metastore/compaction/README.md @@ -0,0 +1,318 @@ +# Pyroscope Compaction Process + +The document introduces the new compaction process design and outlines its implementation. + +## Background + +The compaction approach we currently use assumes that relatively large data blocks are merged into even larger ones, +and the largest blocks are split into shards based on series fingerprints. This approach can lead to uncontrollably high +memory consumption and is only suitable for delayed compaction, when the time range the blocks refer to is protected +from writes ([quiesced](https://en.wikipedia.org/wiki/Quiesce)). Additionally, the compaction algorithm is designed for +deduplication (replica reconciliation), which is not required in [the new ingestion pipeline](../../distributor/README.md#write-path). + +The new Pyroscope ingestion pipeline is designed to gather data in memory as small segments, which are periodically +flushed to object storage, along with the metadata entries being added to the metastore index. Depending on +the configuration and deployment scale, the number of segments created per second can increase significantly, +reaching millions of objects per hour or day. This can lead to performance degradation in the query path due to high +read amplification caused by the large number of small segments. In addition to read amplification, a high number of +metadata entries can also lead to performance degradation across the entire cluster, impacting the write path as well. + +The new background compaction process helps mitigate this by merging small segments into larger ones, aiming to reduce +the number of objects a query needs to fetch from object storage. + +# Compaction Service + +The compaction service is responsible for planning compaction jobs, scheduling their execution, and updating the +metastore index with the results. The compaction service resides within the metastore component, while the compaction +worker is a separate service designed to scale out and in rapidly. + +The compaction service relies on the Raft protocol to guarantee consistency across the replicas. The diagram below +illustrates the interaction between the compaction worker and the compaction service: workers poll the service on a +regular basis to request new compaction jobs and report status updates. + +A status update is processed by the leader node in two steps, each of which is a Raft command committed to the log: +1. First, the leader prepares the plan update – compaction job state changes based on the reported status updates. +This is a read only operation that never modifies the node state. +2. The leader proposes the plan update: all the replicas must apply the planned changes to their state in an idempotent +way, if the proposal is accepted (committed to the Raft log). + +Critical sections are guaranteed to be executed serially in the context of the Raft state machine and by the same +leader (within the same *term*), and atomically from the cluster's perspective. If the prepared compaction plan update +is not accepted by the Raft log, the update plan is discarded, and the new leader will propose a new plan. + +The two-step process ensures that all the replicas use the same compaction plan, regardless of their internal state, +as long as the replicas can apply `UpdateCompactionPlan`change. This is true even in case the compaction algorithm +(the `GetCompactionPlanUpdate` step) changes across the replicas during the ongoing migration – version upgrade or +downgrade. + +> As of now, both steps are committed to the Raft log. However, as an optimization, the first step – preparation, +> can be implemented as a **Linearizable Read** through **Read Index** (which we already use in metadata queries) +> to avoid unnecessary replication of the read-only operation. + +```mermaid +sequenceDiagram + participant W as Compaction Worker + + box Compaction Service + participant H as Handler + participant R as Raft Log + end + +loop + + W ->>+H: PollCompactionJobsRequest + H ->>R: GetCompactionPlanUpdate + + critical FSM state read + create participant U as Plan Update + R ->>U: + U ->>+S: Job status updates + Note right of U: Job ownership is protected with
leases with fencing token + S ->>-U: Job state changes + U ->>+S: Assign jobs + S ->>-U: Job state changes + U ->>+P: Create jobs + Note right of U: New jobs are created if
workers have enough capacity + P ->>P: Dequeue blocks
and load tombstones + P ->>-U: New jobs + U ->>+S: Add jobs + S ->>-U: Job state changes + destroy U + U ->>R: CompactionPlanUpdate + R ->>H: CompactionPlanUpdate + end + + H ->>R: UpdateCompactionPlan + + critical FSM state update + R ->>S: Update schedule
(new, completed, assigned, reassigned jobs) + R ->>P: Remove source blocks from the planner queue (new jobs) + R ->>I: Replace source blocks in the index (completed jobs)
and create tombstones for deleted + I ->>+C: Add new blocks + C ->>C: Enqueue + C ->>-I: + I ->>R: + R ->>H: CompactionPlanUpdate + end + + H ->> W: PollCompactionJobsResponse + +end + + box FSM + participant C as Compactor + participant P as Planner + participant S as Scheduler + participant I as Metadata Index + end +``` + +--- + +# Job Planner + +The compactor is responsible for maintaining a queue of source blocks eligible for compaction. Currently, this queue +is a simple doubly-linked FIFO structure, populated with new block batches as they are added to the index. In the +current implementation, a new compaction job is created once the sufficient number of blocks have been enqueued. +Compaction jobs are planned on demand when requests are received from the compaction service. + +The queue is segmented by the `Tenant`, `Shard`, and `Level` attributes of the block metadata entries, meaning that +a block compaction never crosses these boundaries. This segmentation helps avoid unnecessary compactions of unrelated +blocks. However, the downside is that blocks are never compacted across different shards, which can lead to suboptimal +compaction results. Due to the dynamic data placement, it is possible for a tenant to be placed on a shard for only a +short period of time. As a result, the data in that shard may not be compacted with other data from the same tenant. + +Cross-shard compaction is to be implemented as a future enhancement. The observed impact of the limitation is moderate. + +## Data Layout + +Profiling data from each service (identified by the `service_name` label) is stored as a separate dataset within a block. + +The block layout is composed of a collection of non-overlapping, independent datasets, each containing distinct data. +At compaction, matching datasets from different blocks are merged: their tsdb index, symbols, and profile tables are +merged and rewritten to a new block, to optimize the data for efficient reading. + +--- + +# Job Scheduler + +The scheduler implements the basic **Small Job First** strategy: blocks of lower levels are considered smaller than +blocks of higher levels, and their compaction is prioritized. This is justifiable because the smaller blocks affect +read amplification more than the larger blocks, and the compaction of smaller blocks is more efficient. + +--- + +Compaction jobs are assigned to workers in the order of their priority. + +Internally, the scheduler maintains a priority queue of jobs for each compaction level. Jobs of lower levels are +assigned first, and the scheduler does not consider jobs of higher levels until all eligible jobs of lower levels are +assigned. + +The priority is determined by several factors: +1. Compaction level. +2. Status (enum order). + - `COMPACTION_STATUS_UNSPECIFIED`: unassigned jobs. + - `COMPACTION_STATUS_IN_PROGRESS`: in-progress jobs. The first job that can't be reassigned is a sentinel: + no more jobs are eligible for assignment at this level. +3. Failures: jobs with fewer failures are prioritized. +4. Lease expiration time: the job with the earliest lease expiration time is considered first. + +See [Job Status Description](#job-status-description) for more details. + +> The challenge is that we don't know the capacity of our worker fleet in advance, and we have no control over them; +they can appear and disappear at any time. Another problem is that in some failure modes, such as unavailability or +lack of compaction workers, or temporary unavailability of the metastore service, the number of blocks to be compacted +may reach significant levels (millions). +> +> Therefore, we use an adaptive approach to keep the scheduler's job queue short while ensuring the compaction +workers are fully utilized. In every request, the worker specifies how many free slots it has available for new jobs. +As the compaction procedure is a synchronous CPU-bound task, we use the number of logical CPU cores as the worker's max +capacity and decrement it for each in-progress compaction job. When a new request arrives, it specifies the current +worker's capacity, which serves as evidence that the entire worker fleet has enough resources to handle at least +this number of jobs. Thus, for every request, we try to enqueue a number of jobs equal to the reported capacity. +> +> Over time, this ensures good balance between the number of jobs in the queue and the worker capacity utilization, +even if there are millions of blocks to compact. + +--- + +## Job Ownership + +Distributed locking implementation is inspired by [The Chubby lock service](https://static.googleusercontent.com/media/research.google.com/en//archive/chubby-osdi06.pdf) +and [Leases: An Efficient Fault-Tolerant Mechanism +for Distributed File Cache Consistency](https://dl.acm.org/doi/pdf/10.1145/74851.74870). The implementation is based on +the Raft protocol. + +Ownership of a compaction job is granted to a compaction worker for a specified period – a *lease*: +> A lease is a contract that gives its holder specified rights over property for a limited period of time. + +The real-time clock of the worker and the scheduler cannot be used; instead, the timestamp of the Raft log entry, +assigned by the Raft leader when the entry is appended to the log, serves as the reference point in time. + +> The fact that leases are allocated by the current leader allows for spurious *lease invalidation* when the leader +> changes and the clock skew exceeds the lease duration. This is acceptable because jobs will be reassigned repeatedly, +> and the occurrence of the event should be very rare. However, the solution does not tolerate clock skews exceeding +> the job lease duration (which is 15 seconds by default). + +The log entry index is used as the [fencing token](https://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html) +of protected resources (compaction jobs). + +The Raft log entry index is a monotonically increasing integer, guaranteed to be unique for each command. +Each time a job is assigned to a worker, the worker is provided with the current Raft log index as the fencing token, +which is also assigned to the job. For subsequent requests, the worker must provide the fencing token it was given at +assignment. The ownership of the job is confirmed if the provided token is greater than or equal to the job's token. +The job's token may change if the job is reassigned to another worker, and the new token is derived from the current +Raft log index, which is guaranteed to be greater. + +> Token authentication is not enforced in this design, as the system operates in a trusted environment with cooperative +> workers. However, m malicious workers can arbitrarily specify a token. In the future, we may consider implementing a +> basic authentication mechanism based on cryptographic signatures to further ensure the integrity of token usage. +> +> This is an advisory locking mechanism, meaning resources are not automatically restricted from access when the lock +> is not acquired. Consequently, a client might choose to delete source blocks associated with a compaction job or +> continue processing the job even without holding the lease. This behavior, however, should be avoided in the worker +> implementation. + +## Procedures + +### Assignment + +When a worker requests a new assignment, the scheduler must find the highest-priority job that is not assigned yet, +and assign it to the worker. When a job is assigned, the worker is given a lease with a deadline. +The worker should refresh the lease before it expires. + +### Lease Refresh + +The worker must send a status update to the scheduler to refresh the lease. +The scheduler must update the lease expiration time if the worker still owns the job. + +### Reassignment + +The scheduler may revoke a job if the worker does not send the status update within the lease duration. + +When a new assignment is requested by a worker, the scheduler inspects in-progress jobs and checks if the +lease duration has expired. If the lease has expired, the job is reassigned to the worker requested for a +new assignment. + +--- + +If the timestamp of the current Raft log entry (command) exceeds the job `lease_expires_at` timestamp, +the scheduler must revoke the job: +1. Set the status to `COMPACTION_STATUS_IN_PROGRESS`. +2. Allocate a new lease with an expiration period calculated starting from the current command timestamp. +3. Set the fencing token to the current command index (guaranteed to be higher than the job fencing token). + +--- + +The worker instance that has lost the job is not notified immediately. If the worker reports an update for a job that it +is not assigned to, or if the job is not found (for example, if it has been completed by another worker), the scheduler +does not allocate a new lease; the worker should stop processing. This mechanism prevents the worker from processing +jobs unnecessarily. + +If the worker is not capable of executing the job, it may abandon the job without further notifications. The scheduler +will eventually reassign the job to another worker. The lost job might be reassigned to the same worker instance if that +instance detects the loss before others do: abandoned jobs are assigned to the first worker that requests new +assignments when no unassigned jobs are available. + +There is no explicit mechanism for reporting a failure from the worker. In fact, the scheduler must not rely on error +reports from workers, as jobs that cause workers to crash would yield no reports at all. + +To avoid infinite reassignment loops, the scheduler keeps track of reassignments (failures) for each job. If the number +of failures exceeds a set threshold, the job is not reassigned and remains at the bottom of the queue. Once the cause of +failure is resolved, the limit can be temporarily increased to reprocess these jobs. + +### Job Completion + +When the worker reports a successful completion of the job, the scheduler must remove the job from the schedule and +notify the planner about the completion. + +## Job Status Description + +The diagram below depicts the state machine of the job status. + +```mermaid +stateDiagram-v2 + [*] --> Unassigned : Create Job + Unassigned --> InProgress : Assign Job + InProgress --> Success : Job Completed + InProgress --> LeaseExpired: Job Lease Expires + LeaseExpired: Abandoned Job + + LeaseExpired --> Excluded: Failure Threshold Exceeded + Excluded: Faulty Job + + Success --> [*] : Remove Job from Schedule + LeaseExpired --> InProgress : Reassign Job + + Unassigned : COMPACTION_STATUS_UNSPECIFIED + InProgress : COMPACTION_STATUS_IN_PROGRESS + Success : COMPACTION_STATUS_SUCCESS + + LeaseExpired : COMPACTION_STATUS_IN_PROGRESS + Excluded: COMPACTION_STATUS_IN_PROGRESS +``` + +### Communication + +### Scheduler to Worker + +| Status | Description | +|---------------------------------|-------------------------------------------------------------------------------------| +| `COMPACTION_STATUS_UNSPECIFIED` | Not allowed. | +| `COMPACTION_STATUS_IN_PROGRESS` | Job lease refresh. The worker should refresh the new lease before the new deadline. | +| `COMPACTION_STATUS_SUCCESS` | Not allowed. | +| --- | No lease refresh from the scheduler. The worker should stop processing. | + +### Worker to Scheduler + +| Status | Description | +|---------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------| +| `COMPACTION_STATUS_UNSPECIFIED` | Not allowed. | +| `COMPACTION_STATUS_IN_PROGRESS` | Job lease refresh. The scheduler must extend the lease of the job, if the worker still owns it. | +| `COMPACTION_STATUS_SUCCESS` | The job has been successfully completed. The scheduler must remove the job from the schedule and communicate the update to the planner. | + +### Notes + +* Job status `COMPACTION_STATUS_UNSPECIFIED` is never sent over the wire between the scheduler and workers. +* Job in `COMPACTION_STATUS_IN_PROGRESS` cannot be reassigned if its failure counter exceeds the threshold. +* Job in `COMPACTION_STATUS_SUCCESS` is removed from the schedule immediately. diff --git a/pkg/experiment/metastore/compaction/compaction.go b/pkg/experiment/metastore/compaction/compaction.go new file mode 100644 index 0000000000..3240b5bb08 --- /dev/null +++ b/pkg/experiment/metastore/compaction/compaction.go @@ -0,0 +1,59 @@ +package compaction + +import ( + "github.com/hashicorp/raft" + "go.etcd.io/bbolt" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" +) + +type Compactor interface { + // Compact enqueues a new block for compaction + Compact(*bbolt.Tx, *raft.Log, *metastorev1.BlockMeta) error +} + +type Planner interface { + // NewPlan is used to plan new jobs. The proposed changes will then be + // submitted for Raft consensus, with the leader's jobs being accepted + // as the final decision. + // Implementation: Plan must not change the state of the Planner. + NewPlan(*bbolt.Tx, *raft.Log) Plan + // UpdatePlan communicates the status of the compaction job to the planner. + // Implementation: This method must be idempotent. + UpdatePlan(*bbolt.Tx, *raft.Log, *raft_log.CompactionPlanUpdate) error +} + +type Plan interface { + // CreateJob creates a plan for a new job. + CreateJob() (*raft_log.CompactionJobPlan, error) +} + +type Scheduler interface { + // NewSchedule is used to plan a schedule update. The proposed schedule + // will then be submitted for Raft consensus, with the leader's schedule + // being accepted as the final decision. + // Implementation: Schedule must not change the state of the Scheduler. + NewSchedule(*bbolt.Tx, *raft.Log) Schedule + // UpdateSchedule adds new jobs and updates the state of existing ones. + // Implementation: This method must be idempotent. + UpdateSchedule(*bbolt.Tx, *raft.Log, *raft_log.CompactionPlanUpdate) error +} + +// Schedule prepares changes to the compaction plan based on status updates +// from compaction workers. The standard sequence assumes that job updates +// (including lease renewals and completion reports) occur first, followed by +// the assignment of new jobs to workers. Only after these updates are new +// compaction jobs planned. +type Schedule interface { + // UpdateJob is called on behalf of the worker to update the job status. + // A nil response should be interpreted as "no new lease": stop the work. + // The scheduler must validate that the worker is allowed to update the + // job by comparing the fencing token of the job. + // Refer to the documentation for details. + UpdateJob(*raft_log.CompactionJobStatusUpdate) *raft_log.CompactionJobState + // AssignJob is called on behalf of the worker to request a new job. + AssignJob() (*raft_log.AssignedCompactionJob, error) + // AddJob is called on behalf of the planner to add a new job to the schedule. + AddJob(*raft_log.CompactionJobPlan) *raft_log.CompactionJobState +} diff --git a/pkg/experiment/metastore/compaction/compactor/compaction_queue.go b/pkg/experiment/metastore/compaction/compactor/compaction_queue.go new file mode 100644 index 0000000000..701510b8f5 --- /dev/null +++ b/pkg/experiment/metastore/compaction/compactor/compaction_queue.go @@ -0,0 +1,412 @@ +package compactor + +import ( + "container/heap" + "slices" + "sync" + "sync/atomic" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/grafana/pyroscope/pkg/experiment/metastore/compaction/compactor/store" + "github.com/grafana/pyroscope/pkg/util" +) + +type compactionKey struct { + // Order of the fields is not important. + // Can be generalized. + tenant string + shard uint32 + level uint32 +} + +type compactionQueue struct { + strategy Strategy + registerer prometheus.Registerer + levels []*blockQueue +} + +// blockQueue stages blocks as they are being added. Once a batch of blocks +// within the compaction key reaches a certain size, it is pushed to the linked +// list in the arrival order and to the compaction key queue. +// +// This allows to iterate over the blocks in the order of arrival within the +// compaction dimension, while maintaining an ability to remove blocks from the +// queue efficiently. +// +// No pop operation is needed for the block queue: the only way blocks leave +// the queue is through explicit removal. Batch and block iterators provide +// the read access. +type blockQueue struct { + strategy Strategy + registerer prometheus.Registerer + staged map[compactionKey]*stagedBlocks + // Batches ordered by arrival. + head, tail *batch + // Priority queue by last update: we need to flush + // incomplete batches once they stop updating. + updates *priorityBlockQueue +} + +// stagedBlocks is a queue of blocks sharing the same compaction key. +type stagedBlocks struct { + key compactionKey + // Local queue (blocks sharing this compaction key). + head, tail *batch + // Parent block queue (global). + queue *blockQueue + // Incomplete batch of blocks. + batch *batch + // Map of block IDs to their locations in batches. + refs map[string]blockRef + stats *queueStats + collector *queueStatsCollector + // Parent block queue maintains a priority queue of + // incomplete batches by the last update time. + heapIndex int + updatedAt int64 +} + +type queueStats struct { + blocks atomic.Int32 + batches atomic.Int32 + rejected atomic.Int32 + missed atomic.Int32 +} + +// blockRef points to the block in the batch. +type blockRef struct { + batch *batch + index int +} + +type blockEntry struct { + id string // Block ID. + index uint64 // Index of the command in the raft log. +} + +type batch struct { + flush sync.Once + size uint32 + blocks []blockEntry + // Reference to the parent. + staged *stagedBlocks + // Links to the global batch queue items: + // the compaction key of batches may differ. + nextG, prevG *batch + // Links to the local batch queue items: + // batches that share the same compaction key. + next, prev *batch +} + +func newCompactionQueue(strategy Strategy, registerer prometheus.Registerer) *compactionQueue { + return &compactionQueue{ + strategy: strategy, + registerer: registerer, + } +} + +func (q *compactionQueue) reset() { + for _, level := range q.levels { + if level != nil { + for _, s := range level.staged { + level.removeStaged(s) + } + } + } + clear(q.levels) + q.levels = q.levels[:0] +} + +func (q *compactionQueue) push(e store.BlockEntry) bool { + level := q.blockQueue(e.Level) + staged := level.stagedBlocks(compactionKey{ + tenant: e.Tenant, + shard: e.Shard, + level: e.Level, + }) + pushed := staged.push(blockEntry{ + id: e.ID, + index: e.Index, + }) + staged.updatedAt = e.AppendedAt + heap.Fix(level.updates, staged.heapIndex) + level.flushOldest(e.AppendedAt) + return pushed +} + +func (q *compactionQueue) blockQueue(l uint32) *blockQueue { + s := l + 1 // Levels are 0-based. + if s > uint32(len(q.levels)) { + q.levels = slices.Grow(q.levels, int(s))[:s] + } + level := q.levels[l] + if level == nil { + level = newBlockQueue(q.strategy, q.registerer) + q.levels[l] = level + } + return level +} + +func newBlockQueue(strategy Strategy, registerer prometheus.Registerer) *blockQueue { + return &blockQueue{ + strategy: strategy, + registerer: registerer, + staged: make(map[compactionKey]*stagedBlocks), + updates: new(priorityBlockQueue), + } +} + +func (q *blockQueue) stagedBlocks(k compactionKey) *stagedBlocks { + staged, ok := q.staged[k] + if !ok { + staged = &stagedBlocks{ + queue: q, + key: k, + refs: make(map[string]blockRef), + stats: new(queueStats), + } + staged.resetBatch() + q.staged[k] = staged + heap.Push(q.updates, staged) + if q.registerer != nil { + staged.collector = newQueueStatsCollector(staged) + util.RegisterOrGet(q.registerer, staged.collector) + } + } + return staged +} + +func (q *blockQueue) removeStaged(s *stagedBlocks) { + if s.collector != nil { + q.registerer.Unregister(s.collector) + } + delete(q.staged, s.key) + if s.heapIndex < 0 { + // We usually end up here since s has already been evicted + // from the priority queue via Pop due to its age. + return + } + if s.heapIndex >= q.updates.Len() { + // Should not be possible. + return + } + heap.Remove(q.updates, s.heapIndex) +} + +func (s *stagedBlocks) push(block blockEntry) bool { + if _, found := s.refs[block.id]; found { + s.stats.rejected.Add(1) + return false + } + s.refs[block.id] = blockRef{batch: s.batch, index: len(s.batch.blocks)} + s.batch.blocks = append(s.batch.blocks, block) + s.batch.size++ + s.stats.blocks.Add(1) + if s.queue.strategy.flush(s.batch) && !s.flush() { + // An attempt to flush the same batch twice. + // Should not be possible. + return false + } + return true +} + +func (s *stagedBlocks) flush() (flushed bool) { + s.batch.flush.Do(func() { + s.queue.pushBatch(s.batch) + flushed = true + }) + s.resetBatch() + return flushed +} + +func (s *stagedBlocks) resetBatch() { + // TODO(kolesnikovae): get from pool. + s.batch = &batch{ + blocks: make([]blockEntry, 0, defaultBlockBatchSize), + staged: s, + } +} + +var zeroBlockEntry blockEntry + +func (s *stagedBlocks) delete(block string) blockEntry { + ref, found := s.refs[block] + if !found { + s.stats.missed.Add(1) + return zeroBlockEntry + } + // We can't change the order of the blocks in the batch, + // because that would require updating all the block locations. + e := ref.batch.blocks[ref.index] + ref.batch.blocks[ref.index] = zeroBlockEntry + ref.batch.size-- + s.stats.blocks.Add(-1) + if ref.batch.size == 0 { + s.queue.removeBatch(ref.batch) + // TODO(kolesnikovae): return to pool. + } + delete(s.refs, block) + if len(s.refs) == 0 { + s.queue.removeStaged(s) + } + return e +} + +func (q *blockQueue) pushBatch(b *batch) { + if q.tail != nil { + q.tail.nextG = b + b.prevG = q.tail + } else { + q.head = b + } + q.tail = b + + // Same for the queue of batches + // with matching compaction key. + + if b.staged.tail != nil { + b.staged.tail.next = b + b.prev = b.staged.tail + } else { + b.staged.head = b + } + b.staged.tail = b + b.staged.stats.batches.Add(1) +} + +func (q *blockQueue) removeBatch(b *batch) { + if b.prevG != nil { + b.prevG.nextG = b.nextG + } else { + // This is the head. + q.head = b.nextG + } + if b.nextG != nil { + b.nextG.prevG = b.prevG + } else { + // This is the tail. + q.tail = b.prevG + } + b.nextG = nil + b.prevG = nil + + // Same for the queue of batches + // with matching compaction key. + + if b.prev != nil { + b.prev.next = b.next + } else { + // This is the head. + b.staged.head = b.next + } + if b.next != nil { + b.next.prev = b.prev + } else { + // This is the tail. + b.staged.tail = b.next + } + b.next = nil + b.prev = nil + b.staged.stats.batches.Add(-1) +} + +func (q *blockQueue) flushOldest(now int64) { + if q.updates.Len() == 0 { + // Should not be possible. + return + } + oldest := (*q.updates)[0] + if !q.strategy.flushByAge(oldest.batch, now) { + return + } + heap.Pop(q.updates) + oldest.flush() +} + +type priorityBlockQueue []*stagedBlocks + +func (pq priorityBlockQueue) Len() int { return len(pq) } + +func (pq priorityBlockQueue) Less(i, j int) bool { + return pq[i].updatedAt < pq[j].updatedAt +} + +func (pq priorityBlockQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].heapIndex = i + pq[j].heapIndex = j +} + +func (pq *priorityBlockQueue) Push(x interface{}) { + n := len(*pq) + staged := x.(*stagedBlocks) + staged.heapIndex = n + *pq = append(*pq, staged) +} + +func (pq *priorityBlockQueue) Pop() interface{} { + old := *pq + n := len(old) + staged := old[n-1] + old[n-1] = nil + staged.heapIndex = -1 + *pq = old[0 : n-1] + return staged +} + +func newBatchIter(q *blockQueue) *batchIter { return &batchIter{batch: q.head} } + +// batchIter iterates over the batches in the queue, in the order of arrival. +type batchIter struct{ batch *batch } + +func (i *batchIter) next() (*batch, bool) { + if i.batch == nil { + return nil, false + } + b := i.batch + i.batch = i.batch.nextG + return b, b != nil +} + +// batchIter iterates over the batches in the queue, in the order of arrival +// within the compaction key. It's guaranteed that returned blocks are unique +// across all batched. +type blockIter struct { + visited map[string]struct{} + batch *batch + i int +} + +func newBlockIter() *blockIter { + // Assuming that block IDs (16b ULID) are globally unique. + // We could achieve the same with more efficiency by marking visited + // batches. However, marking visited blocks seems to be more robust, + // and the size of the map is expected to be small. + visited := make(map[string]struct{}, 64) + visited[zeroBlockEntry.id] = struct{}{} + return &blockIter{visited: visited} +} + +func (it *blockIter) setBatch(b *batch) { + it.batch = b + it.i = 0 +} + +func (it *blockIter) next() (string, bool) { + for it.batch != nil { + if it.i >= len(it.batch.blocks) { + it.setBatch(it.batch.next) + continue + } + entry := it.batch.blocks[it.i] + if _, visited := it.visited[entry.id]; visited { + it.i++ + continue + } + it.visited[entry.id] = struct{}{} + it.i++ + return entry.id, true + } + return "", false +} diff --git a/pkg/experiment/metastore/compaction/compactor/compaction_queue_bench_test.go b/pkg/experiment/metastore/compaction/compactor/compaction_queue_bench_test.go new file mode 100644 index 0000000000..d2ebc97b16 --- /dev/null +++ b/pkg/experiment/metastore/compaction/compactor/compaction_queue_bench_test.go @@ -0,0 +1,58 @@ +package compactor + +import ( + "strconv" + "testing" + "time" + + "github.com/grafana/pyroscope/pkg/experiment/metastore/compaction/compactor/store" +) + +func BenchmarkCompactionQueue_Push(b *testing.B) { + s := Strategy{ + MaxBlocksPerLevel: []uint{20, 10, 10}, + MaxBlocksDefault: defaultBlockBatchSize, + MaxBatchAge: defaultMaxBlockBatchAge, + } + + q := newCompactionQueue(s, nil) + const ( + tenants = 1 + levels = 1 + shards = 64 + ) + + keys := make([]compactionKey, levels*tenants*shards) + for i := range keys { + keys[i] = compactionKey{ + tenant: strconv.Itoa(i % tenants), + shard: uint32(i % shards), + level: uint32(i % levels), + } + } + + writes := make([]int64, len(keys)) + now := time.Now().UnixNano() + for i := range writes { + writes[i] = now + } + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for j, key := range keys { + q.push(store.BlockEntry{ + Index: uint64(j), + AppendedAt: writes[j], + ID: strconv.Itoa(j), + Tenant: key.tenant, + Shard: key.shard, + Level: key.level, + }) + } + for j := range writes { + writes[j] += int64(time.Millisecond * 500) + } + } +} diff --git a/pkg/experiment/metastore/compaction/compactor/compaction_queue_test.go b/pkg/experiment/metastore/compaction/compactor/compaction_queue_test.go new file mode 100644 index 0000000000..47ff081750 --- /dev/null +++ b/pkg/experiment/metastore/compaction/compactor/compaction_queue_test.go @@ -0,0 +1,247 @@ +package compactor + +import ( + "fmt" + "math/rand" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/grafana/pyroscope/pkg/experiment/metastore/compaction/compactor/store" +) + +func testBlockEntry(id int) blockEntry { return blockEntry{id: strconv.Itoa(id)} } + +func TestBlockQueue_Push(t *testing.T) { + q := newBlockQueue(Strategy{MaxBlocksDefault: 3}, nil) + key := compactionKey{tenant: "t", shard: 1} + + result := q.stagedBlocks(key).push(testBlockEntry(1)) + require.True(t, result) + require.Equal(t, 1, len(q.staged[key].batch.blocks)) + assert.Equal(t, testBlockEntry(1), q.staged[key].batch.blocks[0]) + + q.stagedBlocks(key).push(testBlockEntry(2)) + q.stagedBlocks(key).push(testBlockEntry(3)) // Staged blocks formed the first batch. + assert.Equal(t, 0, len(q.staged[key].batch.blocks)) + assert.Equal(t, []blockEntry{testBlockEntry(1), testBlockEntry(2), testBlockEntry(3)}, q.head.blocks) + + q.stagedBlocks(key).push(testBlockEntry(4)) + q.stagedBlocks(key).push(testBlockEntry(5)) + assert.Equal(t, 2, len(q.staged[key].batch.blocks)) + + remove(q, key, "1", "2") // Remove the first batch. + assert.Equal(t, []blockEntry{zeroBlockEntry, zeroBlockEntry, testBlockEntry(3)}, q.head.blocks) + remove(q, key, "3") + assert.Nil(t, q.head) + + q.stagedBlocks(key).push(testBlockEntry(6)) // Complete the second batch. + assert.Equal(t, 0, len(q.staged[key].batch.blocks)) + + q.stagedBlocks(key).push(testBlockEntry(7)) + assert.Equal(t, []blockEntry{testBlockEntry(4), testBlockEntry(5), testBlockEntry(6)}, q.head.blocks) + assert.Equal(t, 1, len(q.staged[key].batch.blocks)) +} + +func TestBlockQueue_DuplicateBlock(t *testing.T) { + q := newBlockQueue(Strategy{MaxBlocksDefault: 3}, nil) + key := compactionKey{tenant: "t", shard: 1} + + require.True(t, q.stagedBlocks(key).push(testBlockEntry(1))) + require.False(t, q.stagedBlocks(key).push(testBlockEntry(1))) + + assert.Equal(t, 1, len(q.staged[key].batch.blocks)) +} + +func TestBlockQueue_Remove(t *testing.T) { + q := newBlockQueue(Strategy{MaxBlocksDefault: 3}, nil) + key := compactionKey{tenant: "t", shard: 1} + q.stagedBlocks(key).push(testBlockEntry(1)) + q.stagedBlocks(key).push(testBlockEntry(2)) + + remove(q, key, "1") + require.Empty(t, q.staged[key].batch.blocks[0]) + + _, exists := q.staged[key].refs["1"] + assert.False(t, exists) + + remove(q, key, "2") + require.Nil(t, q.head) + require.Nil(t, q.tail) +} + +func TestBlockQueue_RemoveNotFound(t *testing.T) { + q := newBlockQueue(Strategy{MaxBlocksDefault: 3}, nil) + key := compactionKey{tenant: "t", shard: 1} + remove(q, key, "1") + q.stagedBlocks(key).push(testBlockEntry(1)) + remove(q, key, "2") + q.stagedBlocks(key).push(testBlockEntry(2)) + q.stagedBlocks(key).push(testBlockEntry(3)) + + assert.Equal(t, []blockEntry{testBlockEntry(1), testBlockEntry(2), testBlockEntry(3)}, q.head.blocks) +} + +func TestBlockQueue_Linking(t *testing.T) { + q := newBlockQueue(Strategy{MaxBlocksDefault: 2}, nil) + key := compactionKey{tenant: "t", shard: 1} + + q.stagedBlocks(key).push(testBlockEntry(1)) + q.stagedBlocks(key).push(testBlockEntry(2)) + require.NotNil(t, q.head) + assert.Equal(t, q.head, q.tail) + + q.stagedBlocks(key).push(testBlockEntry(3)) + assert.NotNil(t, q.tail) + assert.Nil(t, q.tail.prevG) + assert.NotNil(t, q.head) + assert.Nil(t, q.head.nextG) + assert.Equal(t, []blockEntry{testBlockEntry(1), testBlockEntry(2)}, q.head.blocks) + assert.Equal(t, q.tail.blocks, q.head.blocks) + + q.stagedBlocks(key).push(testBlockEntry(4)) + assert.NotNil(t, q.tail.prevG) + assert.NotNil(t, q.head.nextG) + + q.stagedBlocks(key).push(testBlockEntry(5)) + q.stagedBlocks(key).push(testBlockEntry(6)) + assert.NotNil(t, q.tail.prevG.prevG) + assert.NotNil(t, q.head.nextG.nextG) + + remove(q, key, "3", "2") + remove(q, key, "4", "1") + remove(q, key, "6") + remove(q, key, "5") + + assert.Nil(t, q.head) + assert.Nil(t, q.tail) +} + +func TestBlockQueue_EmptyQueue(t *testing.T) { + const ( + numKeys = 50 + numBlocksPerKey = 100 + ) + + q := newBlockQueue(Strategy{MaxBlocksDefault: 3}, nil) + keys := make([]compactionKey, numKeys) + for i := 0; i < numKeys; i++ { + keys[i] = compactionKey{ + tenant: fmt.Sprint(i), + shard: uint32(i), + } + } + + blocks := make(map[compactionKey][]string) + for _, key := range keys { + for j := 0; j < numBlocksPerKey; j++ { + block := testBlockEntry(j) + require.True(t, q.stagedBlocks(key).push(block)) + blocks[key] = append(blocks[key], block.id) + } + } + + for key, s := range blocks { + rand.Shuffle(len(s), func(i, j int) { + s[i], s[j] = s[j], s[i] + }) + for _, b := range s { + staged, ok := q.staged[key] + if !ok { + return + } + assert.NotEmpty(t, staged.delete(b)) + } + } + + for key := range blocks { + require.Nil(t, q.staged[key]) + } + + assert.Nil(t, q.head) + assert.Nil(t, q.tail) +} + +func TestBlockQueue_FlushByAge(t *testing.T) { + s := Strategy{ + MaxBlocksDefault: 5, + MaxBatchAge: 1, + } + + c := newCompactionQueue(s, nil) + for _, e := range []store.BlockEntry{ + {Tenant: "A", Shard: 1, Level: 1, Index: 1, AppendedAt: 5, ID: "1"}, + {Tenant: "A", Shard: 1, Level: 1, Index: 2, AppendedAt: 15, ID: "2"}, + {Tenant: "A", Shard: 0, Level: 1, Index: 3, AppendedAt: 30, ID: "3"}, + } { + c.push(e) + } + + batches := make([]blockEntry, 0, 3) + q := c.blockQueue(1) + iter := newBatchIter(q) + for { + b, ok := iter.next() + if !ok { + break + } + batches = append(batches, b.blocks...) + } + + expected := []blockEntry{{"1", 1}, {"2", 2}} + // "3" remains staged as we need another push to evict it. + assert.Equal(t, expected, batches) + + staged := q.stagedBlocks(compactionKey{tenant: "A", shard: 1, level: 1}) + assert.NotEmpty(t, staged.delete("1")) + assert.NotEmpty(t, staged.delete("2")) +} + +func TestBlockQueue_BatchIterator(t *testing.T) { + q := newBlockQueue(Strategy{MaxBlocksDefault: 3}, nil) + keys := []compactionKey{ + {tenant: "t-1", shard: 1}, + {tenant: "t-2", shard: 2}, + } + + for j := 0; j < 20; j++ { + q.stagedBlocks(keys[j%len(keys)]).push(testBlockEntry(j)) + } + + iter := newBatchIter(q) + for _, expected := range []struct { + key compactionKey + blocks []string + }{ + {key: keys[0], blocks: []string{"0", "2", "4"}}, + {key: keys[1], blocks: []string{"1", "3", "5"}}, + {key: keys[0], blocks: []string{"6", "8", "10"}}, + {key: keys[1], blocks: []string{"7", "9", "11"}}, + {key: keys[0], blocks: []string{"12", "14", "16"}}, + {key: keys[1], blocks: []string{"13", "15", "17"}}, + } { + b, ok := iter.next() + require.True(t, ok) + assert.Equal(t, expected.key, b.staged.key) + actual := make([]string, len(b.blocks)) + for i := range b.blocks { + actual[i] = b.blocks[i].id + } + assert.Equal(t, expected.blocks, actual) + } + + _, ok := iter.next() + assert.False(t, ok) +} + +func remove(q *blockQueue, key compactionKey, block ...string) { + staged, ok := q.staged[key] + if !ok { + return + } + for _, b := range block { + staged.delete(b) + } +} diff --git a/pkg/experiment/metastore/compaction/compactor/compactor.go b/pkg/experiment/metastore/compaction/compactor/compactor.go new file mode 100644 index 0000000000..7e6e0eac1f --- /dev/null +++ b/pkg/experiment/metastore/compaction/compactor/compactor.go @@ -0,0 +1,140 @@ +package compactor + +import ( + "flag" + "time" + + "github.com/hashicorp/raft" + "github.com/prometheus/client_golang/prometheus" + "go.etcd.io/bbolt" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" + "github.com/grafana/pyroscope/pkg/experiment/metastore/compaction" + "github.com/grafana/pyroscope/pkg/experiment/metastore/compaction/compactor/store" + "github.com/grafana/pyroscope/pkg/iter" +) + +var ( + _ compaction.Compactor = (*Compactor)(nil) + _ compaction.Planner = (*Compactor)(nil) +) + +type Tombstones interface { + ListTombstones(before time.Time) iter.Iterator[*metastorev1.Tombstones] +} + +type BlockQueueStore interface { + StoreEntry(*bbolt.Tx, store.BlockEntry) error + DeleteEntry(tx *bbolt.Tx, index uint64, id string) error + ListEntries(*bbolt.Tx) iter.Iterator[store.BlockEntry] + CreateBuckets(*bbolt.Tx) error +} + +type Config struct { + Strategy +} + +func (c *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + c.Strategy = DefaultStrategy() + // TODO +} + +type Compactor struct { + config Config + queue *compactionQueue + store BlockQueueStore + tombstones Tombstones +} + +func NewCompactor( + config Config, + store BlockQueueStore, + tombstones Tombstones, + reg prometheus.Registerer, +) *Compactor { + queue := newCompactionQueue(config.Strategy, reg) + return &Compactor{ + config: config, + queue: queue, + store: store, + tombstones: tombstones, + } +} + +func NewStore() *store.BlockQueueStore { + return store.NewBlockQueueStore() +} + +func (c *Compactor) Compact(tx *bbolt.Tx, cmd *raft.Log, md *metastorev1.BlockMeta) error { + if uint(md.CompactionLevel) >= c.config.MaxLevel { + return nil + } + e := store.BlockEntry{ + Index: cmd.Index, + AppendedAt: cmd.AppendedAt.UnixNano(), + ID: md.Id, + Shard: md.Shard, + Level: md.CompactionLevel, + Tenant: md.TenantId, + } + if err := c.store.StoreEntry(tx, e); err != nil { + return err + } + c.enqueue(e) + return nil +} + +func (c *Compactor) enqueue(e store.BlockEntry) bool { + return c.queue.push(e) +} + +func (c *Compactor) NewPlan(_ *bbolt.Tx, cmd *raft.Log) compaction.Plan { + before := cmd.AppendedAt.Add(-c.config.CleanupDelay) + tombstones := c.tombstones.ListTombstones(before) + return &plan{ + compactor: c, + tombstones: tombstones, + blocks: newBlockIter(), + } +} + +func (c *Compactor) UpdatePlan(tx *bbolt.Tx, _ *raft.Log, plan *raft_log.CompactionPlanUpdate) error { + for _, job := range plan.NewJobs { + // Delete source blocks from the compaction queue. + k := compactionKey{ + tenant: job.Plan.Tenant, + shard: job.Plan.Shard, + level: job.Plan.CompactionLevel, + } + staged := c.queue.blockQueue(k.level).stagedBlocks(k) + for _, b := range job.Plan.SourceBlocks { + e := staged.delete(b) + if e == zeroBlockEntry { + continue + } + if err := c.store.DeleteEntry(tx, e.index, e.id); err != nil { + return err + } + } + } + + return nil +} + +func (c *Compactor) Init(tx *bbolt.Tx) error { + return c.store.CreateBuckets(tx) +} + +func (c *Compactor) Restore(tx *bbolt.Tx) error { + // Reset in-memory state before loading entries from the store. + c.queue.reset() + entries := c.store.ListEntries(tx) + defer func() { + _ = entries.Close() + }() + for entries.Next() { + c.enqueue(entries.At()) + } + return entries.Err() +} diff --git a/pkg/experiment/metastore/compaction/compactor/compactor_strategy.go b/pkg/experiment/metastore/compaction/compactor/compactor_strategy.go new file mode 100644 index 0000000000..551b523bbf --- /dev/null +++ b/pkg/experiment/metastore/compaction/compactor/compactor_strategy.go @@ -0,0 +1,69 @@ +package compactor + +import ( + "flag" + "time" +) + +const ( + defaultBlockBatchSize = 20 + defaultMaxBlockBatchAge = int64(15 * time.Minute) +) + +// TODO: Almost everything here should be level specific. + +type Strategy struct { + MaxBlocksPerLevel []uint + MaxBatchAge int64 + MaxLevel uint + + CleanupBatchSize int32 + CleanupDelay time.Duration + + MaxBlocksDefault uint + CleanupJobMinLevel int32 + CleanupJobMaxLevel int32 +} + +func DefaultStrategy() Strategy { + return Strategy{ + MaxBlocksPerLevel: []uint{20, 10, 10}, + MaxBlocksDefault: 10, + MaxLevel: 3, + MaxBatchAge: 3 * time.Minute.Nanoseconds(), //defaultMaxBlockBatchAge, + CleanupBatchSize: 2, + CleanupDelay: 15 * time.Minute, + CleanupJobMaxLevel: 1, + } +} + +func (s *Strategy) RegisterFlags(prefix string, f *flag.FlagSet) {} + +// compact is called after the block has been added to the batch. +// If the function returns true, the batch is flushed to the global +// queue and becomes available for compaction. +func (s Strategy) flush(b *batch) bool { + return uint(b.size) >= s.maxBlocks(b.staged.key.level) +} + +func (s Strategy) flushByAge(b *batch, now int64) bool { + if s.MaxBatchAge > 0 && b.staged.updatedAt > 0 { + age := now - b.staged.updatedAt + return age > s.MaxBatchAge + } + return false +} + +// complete is called after the block is added to the job plan. +// If the function returns true, the job plan is considered complete +// and the job should be scheduled for execution. +func (s Strategy) complete(j *jobPlan) bool { + return uint(len(j.blocks)) >= s.maxBlocks(j.level) +} + +func (s Strategy) maxBlocks(l uint32) uint { + if l >= uint32(len(s.MaxBlocksPerLevel)) || len(s.MaxBlocksPerLevel) == 0 { + return s.MaxBlocksDefault + } + return s.MaxBlocksPerLevel[l] +} diff --git a/pkg/experiment/metastore/compaction/compactor/compactor_test.go b/pkg/experiment/metastore/compaction/compactor/compactor_test.go new file mode 100644 index 0000000000..f0e91b16b8 --- /dev/null +++ b/pkg/experiment/metastore/compaction/compactor/compactor_test.go @@ -0,0 +1,122 @@ +package compactor + +import ( + "errors" + "strconv" + "testing" + "time" + + "github.com/hashicorp/raft" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" + "github.com/grafana/pyroscope/pkg/experiment/metastore/compaction/compactor/store" + "github.com/grafana/pyroscope/pkg/iter" + "github.com/grafana/pyroscope/pkg/test" + "github.com/grafana/pyroscope/pkg/test/mocks/mockcompactor" +) + +func TestCompactor_Compact(t *testing.T) { + queueStore := new(mockcompactor.MockBlockQueueStore) + tombstones := new(mockcompactor.MockTombstones) + + md := &metastorev1.BlockMeta{TenantId: "A", Shard: 0, CompactionLevel: 0, Id: "1"} + cmd := &raft.Log{Index: uint64(1), AppendedAt: time.Unix(0, 0)} + compactor := NewCompactor(testConfig, queueStore, tombstones, nil) + + testErr := errors.New("x") + t.Run("fails if cannot store the entry", test.AssertIdempotentSubtest(t, func(t *testing.T) { + queueStore.On("StoreEntry", mock.Anything, mock.Anything).Return(testErr) + require.ErrorIs(t, compactor.Compact(nil, cmd, md), testErr) + })) + + queueStore.AssertExpectations(t) + tombstones.AssertExpectations(t) +} + +func TestCompactor_UpdatePlan(t *testing.T) { + const N = 10 + + tombstones := new(mockcompactor.MockTombstones) + queueStore := new(mockcompactor.MockBlockQueueStore) + queueStore.On("StoreEntry", mock.Anything, mock.Anything). + Return(nil).Times(N) + + compactor := NewCompactor(testConfig, queueStore, tombstones, nil) + now := time.Unix(0, 0) + for i := 0; i < N; i++ { + cmd := &raft.Log{Index: uint64(1), AppendedAt: now} + md := &metastorev1.BlockMeta{TenantId: "A", Shard: 0, CompactionLevel: 0, Id: strconv.Itoa(i)} + err := compactor.Compact(nil, cmd, md) + require.NoError(t, err) + } + + planned := make([]*raft_log.CompactionJobPlan, 3) + test.AssertIdempotent(t, func(t *testing.T) { + tombstones.On("ListTombstones", mock.Anything). + Return(iter.NewEmptyIterator[*metastorev1.Tombstones](), nil) + + planner := compactor.NewPlan(nil, &raft.Log{Index: uint64(2), AppendedAt: now}) + for i := range planned { + job, err := planner.CreateJob() + require.NoError(t, err) + require.NotNil(t, job) + planned[i] = job + } + + job, err := planner.CreateJob() + require.NoError(t, err) + require.Nil(t, job) + }) + + // UpdatePlan is mostly idempotent, except it won't + // DeleteEntry that is not loaded into memory. + queueStore.On("DeleteEntry", mock.Anything, mock.Anything, mock.Anything). + Return(nil).Times(9) + + test.AssertIdempotent(t, func(t *testing.T) { + newJobs := make([]*raft_log.NewCompactionJob, 3) + for i := range planned { + newJobs[i] = &raft_log.NewCompactionJob{Plan: planned[i]} + } + + update := &raft_log.CompactionPlanUpdate{NewJobs: newJobs} + cmd := &raft.Log{Index: uint64(2), AppendedAt: now} + require.NoError(t, compactor.UpdatePlan(nil, cmd, update)) + + planner := compactor.NewPlan(nil, &raft.Log{Index: uint64(3), AppendedAt: now}) + job, err := planner.CreateJob() + require.NoError(t, err) + require.Nil(t, job) + }) + + queueStore.AssertExpectations(t) + tombstones.AssertExpectations(t) +} + +func TestCompactor_Restore(t *testing.T) { + queueStore := new(mockcompactor.MockBlockQueueStore) + queueStore.On("ListEntries", mock.Anything).Return(iter.NewSliceIterator([]store.BlockEntry{ + {Index: 0, ID: "0", Tenant: "A"}, + {Index: 1, ID: "1", Tenant: "A"}, + {Index: 2, ID: "2", Tenant: "A"}, + {Index: 3, ID: "3", Tenant: "A"}, + })) + + tombstones := new(mockcompactor.MockTombstones) + tombstones.On("ListTombstones", mock.Anything). + Return(iter.NewEmptyIterator[*metastorev1.Tombstones](), nil) + + compactor := NewCompactor(testConfig, queueStore, tombstones, nil) + require.NoError(t, compactor.Restore(nil)) + + planner := compactor.NewPlan(nil, new(raft.Log)) + planned, err := planner.CreateJob() + require.NoError(t, err) + require.NotEmpty(t, planned) + + queueStore.AssertExpectations(t) + tombstones.AssertExpectations(t) +} diff --git a/pkg/experiment/metastore/compaction/compactor/metrics.go b/pkg/experiment/metastore/compaction/compactor/metrics.go new file mode 100644 index 0000000000..25f21cf0a4 --- /dev/null +++ b/pkg/experiment/metastore/compaction/compactor/metrics.go @@ -0,0 +1,68 @@ +package compactor + +import ( + "strconv" + + "github.com/prometheus/client_golang/prometheus" +) + +type queueStatsCollector struct { + stats *queueStats + + blocks *prometheus.Desc + batches *prometheus.Desc + rejected *prometheus.Desc + missed *prometheus.Desc +} + +const blockQueueMetricsPrefix = "compaction_block_queue_" + +func newQueueStatsCollector(staged *stagedBlocks) *queueStatsCollector { + constLabels := map[string]string{ + "tenant": staged.key.tenant, + "shard": strconv.FormatUint(uint64(staged.key.shard), 10), + "level": strconv.FormatUint(uint64(staged.key.level), 10), + } + + return &queueStatsCollector{ + stats: staged.stats, + + blocks: prometheus.NewDesc( + blockQueueMetricsPrefix+"blocks", + "The total number of blocks in the queue.", + nil, constLabels, + ), + + batches: prometheus.NewDesc( + blockQueueMetricsPrefix+"batches", + "The total number of block batches in the queue.", + nil, constLabels, + ), + + rejected: prometheus.NewDesc( + blockQueueMetricsPrefix+"push_rejected_total", + "The total number of blocks rejected on push.", + nil, constLabels, + ), + + missed: prometheus.NewDesc( + blockQueueMetricsPrefix+"delete_missed_total", + "The total number of blocks missed on delete.", + nil, constLabels, + ), + } +} + +func (b *queueStatsCollector) Describe(c chan<- *prometheus.Desc) { + c <- b.blocks + c <- b.batches + c <- b.rejected + c <- b.missed +} + +func (b *queueStatsCollector) Collect(m chan<- prometheus.Metric) { + m <- prometheus.MustNewConstMetric(b.blocks, prometheus.GaugeValue, float64(b.stats.blocks.Load())) + m <- prometheus.MustNewConstMetric(b.batches, prometheus.GaugeValue, float64(b.stats.batches.Load())) + m <- prometheus.MustNewConstMetric(b.rejected, prometheus.CounterValue, float64(b.stats.rejected.Load())) + m <- prometheus.MustNewConstMetric(b.missed, prometheus.CounterValue, float64(b.stats.missed.Load())) +} diff --git a/pkg/experiment/metastore/compaction/compactor/metrics_test.go b/pkg/experiment/metastore/compaction/compactor/metrics_test.go new file mode 100644 index 0000000000..cfcb30d482 --- /dev/null +++ b/pkg/experiment/metastore/compaction/compactor/metrics_test.go @@ -0,0 +1,30 @@ +package compactor + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/grafana/pyroscope/pkg/experiment/metastore/compaction/compactor/store" +) + +func TestCollectorRegistration(t *testing.T) { + reg := prometheus.NewRegistry() + for i := 0; i < 2; i++ { + entries := []store.BlockEntry{ + {Tenant: "A", Shard: 0, Level: 0}, + {Tenant: "A", Shard: 0, Level: 1}, + {Tenant: "A", Shard: 0, Level: 1}, + {Tenant: "A", Shard: 1, Level: 0}, + {Tenant: "B", Shard: 0, Level: 0}, + } + c := NewCompactor(testConfig, nil, nil, reg) + for _, e := range entries { + c.enqueue(e) + } + c.queue.reset() + for _, e := range entries { + c.enqueue(e) + } + } +} diff --git a/pkg/experiment/metastore/compaction/compactor/plan.go b/pkg/experiment/metastore/compaction/compactor/plan.go new file mode 100644 index 0000000000..021e6f104f --- /dev/null +++ b/pkg/experiment/metastore/compaction/compactor/plan.go @@ -0,0 +1,143 @@ +package compactor + +import ( + "fmt" + "slices" + "strconv" + "strings" + + "github.com/cespare/xxhash/v2" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" + "github.com/grafana/pyroscope/pkg/iter" +) + +// plan should be used to prepare the compaction plan update. +// The implementation must have no side effects or alter the +// Compactor in any way. +type plan struct { + level uint32 + // Read-only. + tombstones iter.Iterator[*metastorev1.Tombstones] + compactor *Compactor + batches *batchIter + blocks *blockIter +} + +func (p *plan) CreateJob() (*raft_log.CompactionJobPlan, error) { + planned := p.nextJob() + if planned == nil { + return nil, nil + } + job := raft_log.CompactionJobPlan{ + Name: planned.name, + Shard: planned.shard, + Tenant: planned.tenant, + CompactionLevel: planned.level, + SourceBlocks: planned.blocks, + Tombstones: planned.tombstones, + } + return &job, nil +} + +type jobPlan struct { + compactionKey + name string + tombstones []*metastorev1.Tombstones + blocks []string +} + +// Plan compaction of the queued blocks. The algorithm is simple: +// - Iterate block queues from low levels to higher ones. +// - Find the oldest batch in the order of arrival and try to compact it. +// - A batch may not translate into a job (e.g., if some blocks have been +// removed). Therefore, we navigate to the next batch with the same +// compaction key in this case. +func (p *plan) nextJob() *jobPlan { + var job jobPlan + for p.level < uint32(len(p.compactor.queue.levels)) { + if p.batches == nil { + level := p.compactor.queue.levels[p.level] + if level == nil { + p.level++ + continue + } + p.batches = newBatchIter(level) + } + + b, ok := p.batches.next() + if !ok { + // We've done with the current level: no more batches + // in the in-order queue. Move to the next level. + p.batches = nil + p.level++ + continue + } + + // We've found the oldest batch, it's time to plan a job. + // Job levels are zero based: L0 job means that it includes blocks + // with compaction level 0. This can be altered (1-based levels): + // job.level++ + job.compactionKey = b.staged.key + job.blocks = slices.Grow(job.blocks, defaultBlockBatchSize)[:0] + p.blocks.setBatch(b) + + // Once we finish with the current batch blocks, the iterator moves + // to the next batch–with-the-same-compaction-key, which is not + // necessarily the next in-order-batch from the batch iterator. + for { + block, ok := p.blocks.next() + if !ok { + // No more blocks with this compaction key at the level. + // The current job plan is to be cancelled, and we move + // on to the next in-order batch. + break + } + + job.blocks = append(job.blocks, block) + if p.compactor.config.complete(&job) { + nameJob(&job) + p.getTombstones(&job) + return &job + } + } + } + + return nil +} + +// Job name is a variable length string that should be globally unique +// and is used as a tiebreaker in the compaction job queue ordering. +func nameJob(plan *jobPlan) { + // Should be on stack; 16b per block; expected ~20 blocks. + buf := make([]byte, 0, 512) + for _, b := range plan.blocks { + buf = append(buf, b...) + } + var name strings.Builder + name.WriteString(fmt.Sprintf("%x", xxhash.Sum64(buf))) + name.WriteByte('-') + name.WriteByte('T') + name.WriteString(plan.tenant) + name.WriteByte('-') + name.WriteByte('S') + name.WriteString(strconv.FormatUint(uint64(plan.shard), 10)) + name.WriteByte('-') + name.WriteByte('L') + name.WriteString(strconv.FormatUint(uint64(plan.level), 10)) + plan.name = name.String() +} + +func (p *plan) getTombstones(job *jobPlan) { + if int32(p.level) > p.compactor.config.CleanupJobMaxLevel { + return + } + if int32(p.level) < p.compactor.config.CleanupJobMinLevel { + return + } + s := int(p.compactor.config.CleanupBatchSize) + for i := 0; i < s && p.tombstones.Next(); i++ { + job.tombstones = append(job.tombstones, p.tombstones.At()) + } +} diff --git a/pkg/experiment/metastore/compaction/compactor/plan_test.go b/pkg/experiment/metastore/compaction/compactor/plan_test.go new file mode 100644 index 0000000000..6af56c563c --- /dev/null +++ b/pkg/experiment/metastore/compaction/compactor/plan_test.go @@ -0,0 +1,266 @@ +package compactor + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/grafana/pyroscope/pkg/experiment/metastore/compaction/compactor/store" +) + +var testConfig = Config{ + Strategy: Strategy{ + MaxBlocksPerLevel: []uint{3, 2, 2}, + MaxBlocksDefault: 2, + MaxBatchAge: 0, + MaxLevel: 3, + }, +} + +func TestPlan_same_level(t *testing.T) { + c := NewCompactor(testConfig, nil, nil, nil) + + var i int // The index is used outside the loop. + for _, e := range []store.BlockEntry{ + {Tenant: "A", Shard: 0, Level: 0}, + {Tenant: "B", Shard: 2, Level: 0}, + {Tenant: "A", Shard: 1, Level: 0}, + {Tenant: "A", Shard: 1, Level: 0}, + {Tenant: "B", Shard: 2, Level: 0}, + {Tenant: "A", Shard: 1, Level: 0}, // TA-S1-L0 is ready + {Tenant: "B", Shard: 2, Level: 0}, // TB-S2-L0 + {Tenant: "A", Shard: 0, Level: 0}, + {Tenant: "A", Shard: 1, Level: 0}, + {Tenant: "A", Shard: 0, Level: 0}, // TA-S0-L0 + {Tenant: "B", Shard: 2, Level: 0}, + {Tenant: "A", Shard: 1, Level: 0}, + } { + e.Index = uint64(i) + e.ID = strconv.Itoa(i) + c.enqueue(e) + i++ + } + + expected := []*jobPlan{ + { + compactionKey: compactionKey{tenant: "A", shard: 1, level: 0}, + name: "ffba6b12acb007e6-TA-S1-L0", + blocks: []string{"2", "3", "5"}, + }, + { + compactionKey: compactionKey{tenant: "B", shard: 2, level: 0}, + name: "3860b3ec2cf5bfa3-TB-S2-L0", + blocks: []string{"1", "4", "6"}, + }, + { + compactionKey: compactionKey{tenant: "A", shard: 0, level: 0}, + name: "6a1fee35d1568267-TA-S0-L0", + blocks: []string{"0", "7", "9"}, + }, + } + + p := &plan{compactor: c, blocks: newBlockIter()} + planned := make([]*jobPlan, 0, len(expected)) + for j := p.nextJob(); j != nil; j = p.nextJob() { + planned = append(planned, j) + } + assert.Equal(t, expected, planned) + + // Now we're adding some more blocks to produce more jobs, + // using the same queue. We expect all the previously planned + // jobs and new ones. + expected = append(expected, []*jobPlan{ + { + compactionKey: compactionKey{tenant: "A", shard: 1, level: 0}, + name: "34d4246acbf55d05-TA-S1-L0", + blocks: []string{"8", "11", "13"}, + }, + { + compactionKey: compactionKey{tenant: "B", shard: 2, level: 0}, + name: "5567ff0cdb349aaf-TB-S2-L0", + blocks: []string{"10", "12", "14"}, + }, + }...) + + for _, e := range []store.BlockEntry{ + {Tenant: "B", Shard: 2, Level: 0}, + {Tenant: "A", Shard: 1, Level: 0}, // TA-S1-L0 is ready + {Tenant: "B", Shard: 2, Level: 0}, // TB-S2-L0 + } { + e.Index = uint64(i) + e.ID = strconv.Itoa(i) + c.enqueue(e) + i++ + } + + p = &plan{compactor: c, blocks: newBlockIter()} + planned = planned[:0] // Old jobs should be re-planned. + for j := p.nextJob(); j != nil; j = p.nextJob() { + planned = append(planned, j) + } + assert.Equal(t, expected, planned) +} + +func TestPlan_level_priority(t *testing.T) { + c := NewCompactor(testConfig, nil, nil, nil) + + // Lower level job should be planned first despite the arrival order. + var i int + for _, e := range []store.BlockEntry{ + {Tenant: "B", Shard: 2, Level: 1}, + {Tenant: "A", Shard: 1, Level: 0}, + {Tenant: "A", Shard: 1, Level: 0}, + {Tenant: "B", Shard: 2, Level: 1}, // TB-S2-L1 is ready + {Tenant: "A", Shard: 1, Level: 0}, // TA-S1-L0 + } { + e.Index = uint64(i) + e.ID = strconv.Itoa(i) + c.enqueue(e) + i++ + } + + expected := []*jobPlan{ + { + compactionKey: compactionKey{tenant: "A", shard: 1, level: 0}, + name: "3567f9a8f34203a9-TA-S1-L0", + blocks: []string{"1", "2", "4"}, + }, + { + compactionKey: compactionKey{tenant: "B", shard: 2, level: 1}, + name: "3254788b90b8fafc-TB-S2-L1", + blocks: []string{"0", "3"}, + }, + } + + p := &plan{compactor: c, blocks: newBlockIter()} + planned := make([]*jobPlan, 0, len(expected)) + for j := p.nextJob(); j != nil; j = p.nextJob() { + planned = append(planned, j) + } + + assert.Equal(t, expected, planned) +} + +func TestPlan_empty_queue(t *testing.T) { + c := NewCompactor(testConfig, nil, nil, nil) + + p := &plan{compactor: c, blocks: newBlockIter()} + assert.Nil(t, p.nextJob()) + + c.enqueue(store.BlockEntry{ + Index: 0, + ID: "0", + Tenant: "A", + Shard: 1, + Level: 1, + }) + + // L0 queue is empty. + // L1 queue has one block. + p = &plan{compactor: c, blocks: newBlockIter()} + assert.Nil(t, p.nextJob()) + + c.enqueue(store.BlockEntry{ + Index: 1, + ID: "1", + Tenant: "A", + Shard: 1, + Level: 1, + }) + + // L0 queue is empty. + // L2 has blocks for a job. + p = &plan{compactor: c, blocks: newBlockIter()} + assert.NotNil(t, p.nextJob()) +} + +func TestPlan_deleted_blocks(t *testing.T) { + c := NewCompactor(testConfig, nil, nil, nil) + + var i int // The index is used outside the loop. + for _, e := range []store.BlockEntry{ + {Tenant: "A", Shard: 1, Level: 0}, + {Tenant: "B", Shard: 2, Level: 0}, + {Tenant: "A", Shard: 1, Level: 0}, + {Tenant: "B", Shard: 2, Level: 0}, + {Tenant: "A", Shard: 1, Level: 0}, // TA-S1-L0 is ready + {Tenant: "B", Shard: 2, Level: 0}, // TB-S2-L0 + } { + e.Index = uint64(i) + e.ID = strconv.Itoa(i) + c.enqueue(e) + i++ + } + + // Invalidate TA-S1-L0 plan by removing some blocks. + remove(c.queue.levels[0], compactionKey{ + tenant: "A", + shard: 1, + level: 0, + }, "0", "4") + + // "0" - - - + // "1" {Tenant: "B", Shard: 2, Level: 0}, + // "2" {Tenant: "A", Shard: 1, Level: 0}, + // "3" {Tenant: "B", Shard: 2, Level: 0}, + // "4" - - - // TA-S1-L0 would be created here. + // "5" {Tenant: "B", Shard: 2, Level: 0}, // TB-S2-L0 is ready + expected := []*jobPlan{ + { + compactionKey: compactionKey{tenant: "B", shard: 2, level: 0}, + name: "5668d093d5b7cc2f-TB-S2-L0", + blocks: []string{"1", "3", "5"}, + }, + } + + p := &plan{compactor: c, blocks: newBlockIter()} + planned := make([]*jobPlan, 0, len(expected)) + for j := p.nextJob(); j != nil; j = p.nextJob() { + planned = append(planned, j) + } + assert.Equal(t, expected, planned) + + // Now we add some more blocks to make sure that the + // invalidated queue can still be compacted. + for _, e := range []store.BlockEntry{ + {Tenant: "A", Shard: 1, Level: 0}, + {Tenant: "A", Shard: 1, Level: 0}, + {Tenant: "A", Shard: 1, Level: 0}, + } { + e.Index = uint64(i) + e.ID = strconv.Itoa(i) + c.enqueue(e) + i++ + } + + expected = append([]*jobPlan{ + { + compactionKey: compactionKey{tenant: "A", shard: 1, level: 0}, + name: "69cebc117138be9-TA-S1-L0", + blocks: []string{"2", "6", "7"}, + }, + }, expected...) + + p = &plan{compactor: c, blocks: newBlockIter()} + planned = planned[:0] + for j := p.nextJob(); j != nil; j = p.nextJob() { + planned = append(planned, j) + } + assert.Equal(t, expected, planned) +} + +func TestPlan_deleted_batch(t *testing.T) { + c := NewCompactor(testConfig, nil, nil, nil) + + for i, e := range []store.BlockEntry{{}, {}, {}} { + e.Index = uint64(i) + e.ID = strconv.Itoa(i) + c.enqueue(e) + } + + remove(c.queue.levels[0], compactionKey{}, "0", "1", "2") + + p := &plan{compactor: c, blocks: newBlockIter()} + assert.Nil(t, p.nextJob()) +} diff --git a/pkg/experiment/metastore/compaction/compactor/store/block_queue_store.go b/pkg/experiment/metastore/compaction/compactor/store/block_queue_store.go new file mode 100644 index 0000000000..d878a9e7df --- /dev/null +++ b/pkg/experiment/metastore/compaction/compactor/store/block_queue_store.go @@ -0,0 +1,121 @@ +package store + +import ( + "encoding/binary" + "errors" + + "go.etcd.io/bbolt" + + "github.com/grafana/pyroscope/pkg/experiment/metastore/store" + "github.com/grafana/pyroscope/pkg/iter" +) + +var ErrInvalidBlockEntry = errors.New("invalid block entry") + +var blockQueueBucketName = []byte("compaction_block_queue") + +// BlockEntry represents a block metadata entry in the compaction block queue. +type BlockEntry struct { + // Key. Ensures uniqueness and order. + Index uint64 + ID string + // Value. Needed to place the entry in the right queue. + AppendedAt int64 + Level uint32 + Shard uint32 + Tenant string +} + +// BlockQueueStore provides methods to store and retrieve block queues. +// The store is optimized for two cases: load the entire queue (preserving +// the original order) and remove an entry from the queue. +// +// Compactor maintains an in-memory queue of blocks to compact, therefore +// the store never reads individual entries. +// +// NOTE(kolesnikovae): We can leverage the fact that removed entries are +// always ordered in ascending order by index and use the same cursor when +// removing entries from the database: +// DeleteEntry(*bbolt.Tx, ...store.BlockEntry) error +type BlockQueueStore struct{ bucketName []byte } + +func NewBlockQueueStore() *BlockQueueStore { + return &BlockQueueStore{bucketName: blockQueueBucketName} +} + +func (s BlockQueueStore) CreateBuckets(tx *bbolt.Tx) error { + _, err := tx.CreateBucketIfNotExists(s.bucketName) + return err +} + +func (s BlockQueueStore) StoreEntry(tx *bbolt.Tx, entry BlockEntry) error { + e := marshalBlockEntry(entry) + return tx.Bucket(s.bucketName).Put(e.Key, e.Value) +} + +func (s BlockQueueStore) DeleteEntry(tx *bbolt.Tx, index uint64, id string) error { + return tx.Bucket(s.bucketName).Delete(marshalBlockEntryKey(index, id)) +} + +func (s BlockQueueStore) ListEntries(tx *bbolt.Tx) iter.Iterator[BlockEntry] { + return newBlockEntriesIterator(tx.Bucket(s.bucketName)) +} + +type blockEntriesIterator struct { + iter *store.CursorIterator + cur BlockEntry + err error +} + +func newBlockEntriesIterator(bucket *bbolt.Bucket) *blockEntriesIterator { + return &blockEntriesIterator{iter: store.NewCursorIter(nil, bucket.Cursor())} +} + +func (x *blockEntriesIterator) Next() bool { + if x.err != nil || !x.iter.Next() { + return false + } + x.err = unmarshalBlockEntry(&x.cur, x.iter.At()) + return x.err == nil +} + +func (x *blockEntriesIterator) At() BlockEntry { return x.cur } + +func (x *blockEntriesIterator) Close() error { return x.iter.Close() } + +func (x *blockEntriesIterator) Err() error { + if err := x.iter.Err(); err != nil { + return err + } + return x.err +} + +func marshalBlockEntry(e BlockEntry) store.KV { + k := marshalBlockEntryKey(e.Index, e.ID) + b := make([]byte, 8+4+4+len(e.Tenant)) + binary.BigEndian.PutUint64(b[0:8], uint64(e.AppendedAt)) + binary.BigEndian.PutUint32(b[8:12], e.Level) + binary.BigEndian.PutUint32(b[12:16], e.Shard) + copy(b[16:], e.Tenant) + return store.KV{Key: k, Value: b} +} + +func marshalBlockEntryKey(index uint64, id string) []byte { + b := make([]byte, 8+len(id)) + binary.BigEndian.PutUint64(b, index) + copy(b[8:], id) + return b +} + +func unmarshalBlockEntry(dst *BlockEntry, e store.KV) error { + if len(e.Key) < 8 || len(e.Value) < 16 { + return ErrInvalidBlockEntry + } + dst.Index = binary.BigEndian.Uint64(e.Key) + dst.ID = string(e.Key[8:]) + dst.AppendedAt = int64(binary.BigEndian.Uint64(e.Value[0:8])) + dst.Level = binary.BigEndian.Uint32(e.Value[8:12]) + dst.Shard = binary.BigEndian.Uint32(e.Value[12:16]) + dst.Tenant = string(e.Value[16:]) + return nil +} diff --git a/pkg/experiment/metastore/compaction/compactor/store/block_queue_store_test.go b/pkg/experiment/metastore/compaction/compactor/store/block_queue_store_test.go new file mode 100644 index 0000000000..b8e33d7e8b --- /dev/null +++ b/pkg/experiment/metastore/compaction/compactor/store/block_queue_store_test.go @@ -0,0 +1,103 @@ +package store + +import ( + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/grafana/pyroscope/pkg/test" +) + +func TestBlockQueueStore_StoreEntry(t *testing.T) { + db := test.BoltDB(t) + + s := NewBlockQueueStore() + tx, err := db.Begin(true) + require.NoError(t, err) + require.NoError(t, s.CreateBuckets(tx)) + + entries := make([]BlockEntry, 1000) + for i := range entries { + entries[i] = BlockEntry{ + Index: uint64(i), + ID: strconv.Itoa(i), + AppendedAt: time.Now().UnixNano(), + Level: uint32(i % 3), + Shard: uint32(i % 8), + Tenant: strconv.Itoa(i % 4), + } + } + for i := range entries { + assert.NoError(t, s.StoreEntry(tx, entries[i])) + } + require.NoError(t, tx.Commit()) + + s = NewBlockQueueStore() + tx, err = db.Begin(false) + require.NoError(t, err) + iter := s.ListEntries(tx) + var i int + for iter.Next() { + assert.Less(t, i, len(entries)) + assert.Equal(t, entries[i], iter.At()) + i++ + } + assert.Nil(t, iter.Err()) + assert.Nil(t, iter.Close()) + require.NoError(t, tx.Rollback()) +} + +func TestBlockQueueStore_DeleteEntry(t *testing.T) { + db := test.BoltDB(t) + + s := NewBlockQueueStore() + tx, err := db.Begin(true) + require.NoError(t, err) + require.NoError(t, s.CreateBuckets(tx)) + + entries := make([]BlockEntry, 1000) + for i := range entries { + entries[i] = BlockEntry{ + Index: uint64(i), + ID: strconv.Itoa(i), + AppendedAt: time.Now().UnixNano(), + Level: uint32(i % 3), + Shard: uint32(i % 8), + Tenant: strconv.Itoa(i % 4), + } + } + for i := range entries { + assert.NoError(t, s.StoreEntry(tx, entries[i])) + } + require.NoError(t, tx.Commit()) + + // Delete random 25%. + tx, err = db.Begin(true) + require.NoError(t, err) + for i := 0; i < len(entries); i += 4 { + assert.NoError(t, s.DeleteEntry(tx, entries[i].Index, entries[i].ID)) + } + require.NoError(t, tx.Commit()) + + // Check remaining entries. + s = NewBlockQueueStore() + tx, err = db.Begin(false) + require.NoError(t, err) + iter := s.ListEntries(tx) + var i int + for iter.Next() { + if i%4 == 0 { + // Skip deleted entries. + i++ + } + assert.Less(t, i, len(entries)) + assert.Equal(t, entries[i], iter.At()) + i++ + } + assert.Nil(t, iter.Err()) + assert.Nil(t, iter.Close()) + require.NoError(t, tx.Rollback()) +} diff --git a/pkg/experiment/metastore/compaction/scheduler/metrics.go b/pkg/experiment/metastore/compaction/scheduler/metrics.go new file mode 100644 index 0000000000..a8de0458bb --- /dev/null +++ b/pkg/experiment/metastore/compaction/scheduler/metrics.go @@ -0,0 +1,119 @@ +package scheduler + +import ( + "strconv" + + "github.com/prometheus/client_golang/prometheus" +) + +type statsCollector struct { + s *Scheduler + + addedTotal *prometheus.Desc + completedTotal *prometheus.Desc + assignedTotal *prometheus.Desc + reassignedTotal *prometheus.Desc + + // Gauge showing the job queue status breakdown. + jobs *prometheus.Desc +} + +const schedulerQueueMetricsPrefix = "compaction_scheduler_queue_" + +func newStatsCollector(s *Scheduler) *statsCollector { + variableLabels := []string{"level"} + statusGaugeLabels := append(variableLabels, "status") + return &statsCollector{ + s: s, + + jobs: prometheus.NewDesc( + schedulerQueueMetricsPrefix+"jobs", + "The total number of jobs in the queue.", + statusGaugeLabels, nil, + ), + + addedTotal: prometheus.NewDesc( + schedulerQueueMetricsPrefix+"added_jobs_total", + "The total number of jobs added to the queue.", + variableLabels, nil, + ), + completedTotal: prometheus.NewDesc( + schedulerQueueMetricsPrefix+"completed_jobs_total", + "The total number of jobs completed.", + variableLabels, nil, + ), + assignedTotal: prometheus.NewDesc( + schedulerQueueMetricsPrefix+"assigned_jobs_total", + "The total number of jobs assigned.", + variableLabels, nil, + ), + reassignedTotal: prometheus.NewDesc( + schedulerQueueMetricsPrefix+"reassigned_jobs_total", + "The total number of jobs reassigned.", + variableLabels, nil, + ), + } +} + +func (c *statsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.jobs + ch <- c.addedTotal + ch <- c.completedTotal + ch <- c.assignedTotal + ch <- c.reassignedTotal +} + +func (c *statsCollector) Collect(ch chan<- prometheus.Metric) { + for _, m := range c.collectMetrics() { + ch <- m + } +} + +func (c *statsCollector) collectMetrics() []prometheus.Metric { + c.s.mu.Lock() + defer c.s.mu.Unlock() + + metrics := make([]prometheus.Metric, 0, 8*len(c.s.queue.levels)) + for i, q := range c.s.queue.levels { + var stats queueStats + for _, e := range *q.jobs { + switch { + case e.Status == 0: + stats.unassigned++ + case c.s.config.MaxFailures > 0 && uint64(e.Failures) >= c.s.config.MaxFailures: + stats.failed++ + case e.Failures > 0: + stats.reassigned++ + default: + stats.assigned++ + } + } + + // Update stored gauges. Those are not used at the moment, + // but can help planning schedule updates in the future. + q.stats.assigned = stats.assigned + q.stats.unassigned = stats.unassigned + q.stats.reassigned = stats.reassigned + q.stats.failed = stats.failed + + // Counters are updated on access. + stats.addedTotal = q.stats.addedTotal + stats.completedTotal = q.stats.completedTotal + stats.assignedTotal = q.stats.assignedTotal + stats.reassignedTotal = q.stats.reassignedTotal + + level := strconv.Itoa(i) + metrics = append(metrics, + prometheus.MustNewConstMetric(c.jobs, prometheus.GaugeValue, float64(stats.assigned), level, "assigned"), + prometheus.MustNewConstMetric(c.jobs, prometheus.GaugeValue, float64(stats.unassigned), level, "unassigned"), + prometheus.MustNewConstMetric(c.jobs, prometheus.GaugeValue, float64(stats.reassigned), level, "reassigned"), + prometheus.MustNewConstMetric(c.jobs, prometheus.GaugeValue, float64(stats.failed), level, "failed"), + prometheus.MustNewConstMetric(c.addedTotal, prometheus.CounterValue, float64(stats.addedTotal), level), + prometheus.MustNewConstMetric(c.completedTotal, prometheus.CounterValue, float64(stats.completedTotal), level), + prometheus.MustNewConstMetric(c.assignedTotal, prometheus.CounterValue, float64(stats.assignedTotal), level), + prometheus.MustNewConstMetric(c.reassignedTotal, prometheus.CounterValue, float64(stats.reassignedTotal), level), + ) + } + + return metrics +} diff --git a/pkg/experiment/metastore/compaction/scheduler/metrics_test.go b/pkg/experiment/metastore/compaction/scheduler/metrics_test.go new file mode 100644 index 0000000000..10f1710f0d --- /dev/null +++ b/pkg/experiment/metastore/compaction/scheduler/metrics_test.go @@ -0,0 +1,29 @@ +package scheduler + +import ( + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" +) + +func TestCollectorRegistration(t *testing.T) { + reg := prometheus.NewRegistry() + config := Config{ + MaxFailures: 5, + LeaseDuration: 15 * time.Second, + } + + for i := 0; i < 2; i++ { + sc := NewScheduler(config, nil, reg) + sc.queue.put(&raft_log.CompactionJobState{Name: "a"}) + sc.queue.put(&raft_log.CompactionJobState{ + Name: "b", CompactionLevel: 1, Token: 1, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, + }) + sc.queue.delete("a") + } +} diff --git a/pkg/experiment/metastore/compaction/scheduler/schedule.go b/pkg/experiment/metastore/compaction/scheduler/schedule.go new file mode 100644 index 0000000000..8773d6a469 --- /dev/null +++ b/pkg/experiment/metastore/compaction/scheduler/schedule.go @@ -0,0 +1,178 @@ +package scheduler + +import ( + "container/heap" + "slices" + "time" + + "go.etcd.io/bbolt" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" +) + +// schedule should be used to prepare the compaction plan update. +// The implementation must have no side effects or alter the +// Scheduler in any way. +type schedule struct { + tx *bbolt.Tx + now time.Time + token uint64 + // Read-only. + scheduler *Scheduler + // Uncommitted schedule updates. + updates map[string]*raft_log.CompactionJobState + // Modified copy of the job queue. + copied []priorityJobQueue + level int +} + +func (p *schedule) AssignJob() (*raft_log.AssignedCompactionJob, error) { + state := p.nextAssignment() + if state == nil { + return nil, nil + } + plan, err := p.scheduler.store.GetJobPlan(p.tx, state.Name) + if err != nil { + return nil, err + } + p.updates[state.Name] = state + assigned := &raft_log.AssignedCompactionJob{ + State: state, + Plan: plan, + } + return assigned, nil +} + +func (p *schedule) UpdateJob(status *raft_log.CompactionJobStatusUpdate) *raft_log.CompactionJobState { + state := p.newStateForStatusReport(status) + if state == nil { + return nil + } + // State changes should be taken into account when we assign jobs. + p.updates[status.Name] = state + return state +} + +// handleStatusReport reports the job state change caused by the status report +// from compaction worker. The function does not modify the actual job queue. +func (p *schedule) newStateForStatusReport(status *raft_log.CompactionJobStatusUpdate) *raft_log.CompactionJobState { + state := p.scheduler.queue.jobs[status.Name] + if state == nil { + // This may happen if the job has been reassigned + // and completed by another worker; we respond in + // the same way. + return nil + } + + if state.Token > status.Token { + // The job is not assigned to this worker. + return nil + } + + switch newState := state.CloneVT(); status.Status { + case metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS: + // A regular lease renewal. + newState.LeaseExpiresAt = p.allocateLease() + return newState + + case metastorev1.CompactionJobStatus_COMPACTION_STATUS_SUCCESS: + newState.Status = status.Status + return newState + + default: + // Not allowed and unknown status updates can be safely ignored: + // eventually, the job will be reassigned. The same for status + // handlers: a nil state is returned, which is interpreted as + // "no new lease, stop the work". + } + + return nil +} + +// AddJob creates a state for the new plan. The method must be called +// after the last AssignJob and UpdateJob calls. +func (p *schedule) AddJob(plan *raft_log.CompactionJobPlan) *raft_log.CompactionJobState { + // TODO(kolesnikovae): Job queue size limit. + state := &raft_log.CompactionJobState{ + Name: plan.Name, + CompactionLevel: plan.CompactionLevel, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_UNSPECIFIED, + AddedAt: p.now.UnixNano(), + Token: p.token, + } + p.updates[state.Name] = state + return state +} + +func (p *schedule) nextAssignment() *raft_log.CompactionJobState { + // We don't need to check the job ownership here: the worker asks + // for a job assigment (new ownership). + for p.level < len(p.scheduler.queue.levels) { + pq := p.queueLevelCopy(p.level) + if pq.Len() == 0 { + p.level++ + continue + } + + job := heap.Pop(pq).(*jobEntry) + if _, found := p.updates[job.Name]; found { + // We don't even consider own jobs: these are already + // assigned and are in-progress or have been completed. + // This, however, does not prevent from reassigning a + // job that the worker has abandoned in the past. + // Newly created jobs are not considered here as well. + continue + } + + switch job.Status { + case metastorev1.CompactionJobStatus_COMPACTION_STATUS_UNSPECIFIED: + return p.assignJob(job) + + case metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS: + if p.shouldReassign(job) { + state := p.assignJob(job) + state.Failures++ + return state + } + } + + // If no jobs can be assigned at this level. + p.level++ + } + + return nil +} + +func (p *schedule) allocateLease() int64 { + return p.now.Add(p.scheduler.config.LeaseDuration).UnixNano() +} + +func (p *schedule) assignJob(e *jobEntry) *raft_log.CompactionJobState { + job := e.CompactionJobState.CloneVT() + job.Status = metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS + job.LeaseExpiresAt = p.allocateLease() + job.Token = p.token + return job +} + +func (p *schedule) shouldReassign(job *jobEntry) bool { + abandoned := p.now.UnixNano() > job.LeaseExpiresAt + limit := p.scheduler.config.MaxFailures + faulty := limit > 0 && uint64(job.Failures) >= limit + return abandoned && !faulty +} + +// The queue must not be modified by the assigner. Therefore, we're copying the +// queue levels lazily. The queue is supposed to be small (hundreds of jobs +// running concurrently); in the worst case, we have a ~24b alloc per entry. +func (p *schedule) queueLevelCopy(i int) *priorityJobQueue { + s := i + 1 // Levels are 0-based. + if s > len(p.copied) { + p.copied = slices.Grow(p.copied, s)[:s] + if p.copied[i] == nil { + p.copied[i] = p.scheduler.queue.level(uint32(i)).clone() + } + } + return &p.copied[i] +} diff --git a/pkg/experiment/metastore/compaction/scheduler/schedule_test.go b/pkg/experiment/metastore/compaction/scheduler/schedule_test.go new file mode 100644 index 0000000000..6d24bca6e1 --- /dev/null +++ b/pkg/experiment/metastore/compaction/scheduler/schedule_test.go @@ -0,0 +1,321 @@ +package scheduler + +import ( + "testing" + "time" + + "github.com/hashicorp/raft" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" + "github.com/grafana/pyroscope/pkg/test" + "github.com/grafana/pyroscope/pkg/test/mocks/mockscheduler" +) + +func TestSchedule_Update_LeaseRenewal(t *testing.T) { + store := new(mockscheduler.MockJobStore) + config := Config{ + MaxFailures: 3, + LeaseDuration: 10 * time.Second, + } + + scheduler := NewScheduler(config, store, nil) + scheduler.queue.put(&raft_log.CompactionJobState{ + Name: "1", + CompactionLevel: 0, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, + Token: 1, + LeaseExpiresAt: 0, + }) + + t.Run("Owner", test.AssertIdempotentSubtest(t, func(t *testing.T) { + s := scheduler.NewSchedule(nil, &raft.Log{Index: 1, AppendedAt: time.Unix(0, 0)}) + update := s.UpdateJob(&raft_log.CompactionJobStatusUpdate{ + Name: "1", + Token: 1, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, + }) + assert.Equal(t, &raft_log.CompactionJobState{ + Name: "1", + CompactionLevel: 0, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, + Token: 1, + LeaseExpiresAt: int64(config.LeaseDuration), + }, update) + })) + + t.Run("NotOwner", test.AssertIdempotentSubtest(t, func(t *testing.T) { + s := scheduler.NewSchedule(nil, &raft.Log{Index: 1, AppendedAt: time.Unix(0, 0)}) + assert.Nil(t, s.UpdateJob(&raft_log.CompactionJobStatusUpdate{ + Name: "1", + Token: 0, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, + })) + })) + + t.Run("JobCompleted", test.AssertIdempotentSubtest(t, func(t *testing.T) { + s := scheduler.NewSchedule(nil, &raft.Log{Index: 1, AppendedAt: time.Unix(0, 0)}) + assert.Nil(t, s.UpdateJob(&raft_log.CompactionJobStatusUpdate{ + Name: "0", + Token: 1, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, + })) + })) + + t.Run("WrongStatus", test.AssertIdempotentSubtest(t, func(t *testing.T) { + s := scheduler.NewSchedule(nil, &raft.Log{Index: 1, AppendedAt: time.Unix(0, 0)}) + assert.Nil(t, s.UpdateJob(&raft_log.CompactionJobStatusUpdate{ + Name: "1", + Token: 1, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_UNSPECIFIED, + })) + })) +} + +func TestSchedule_Update_JobCompleted(t *testing.T) { + store := new(mockscheduler.MockJobStore) + config := Config{ + MaxFailures: 3, + LeaseDuration: 10 * time.Second, + } + + scheduler := NewScheduler(config, store, nil) + scheduler.queue.put(&raft_log.CompactionJobState{ + Name: "1", + CompactionLevel: 1, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, + Token: 1, + }) + + t.Run("Owner", test.AssertIdempotentSubtest(t, func(t *testing.T) { + s := scheduler.NewSchedule(nil, &raft.Log{Index: 1, AppendedAt: time.Unix(0, 0)}) + update := s.UpdateJob(&raft_log.CompactionJobStatusUpdate{ + Name: "1", + Token: 1, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_SUCCESS, + }) + assert.Equal(t, &raft_log.CompactionJobState{ + Name: "1", + CompactionLevel: 1, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_SUCCESS, + Token: 1, + }, update) + })) + + t.Run("NotOwner", test.AssertIdempotentSubtest(t, func(t *testing.T) { + s := scheduler.NewSchedule(nil, &raft.Log{Index: 1, AppendedAt: time.Unix(0, 0)}) + assert.Nil(t, s.UpdateJob(&raft_log.CompactionJobStatusUpdate{ + Name: "1", + Token: 0, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_SUCCESS, + })) + })) +} + +func TestSchedule_Assign(t *testing.T) { + store := new(mockscheduler.MockJobStore) + config := Config{ + MaxFailures: 3, + LeaseDuration: 10 * time.Second, + } + + scheduler := NewScheduler(config, store, nil) + plans := []*raft_log.CompactionJobPlan{ + {Name: "2", Tenant: "A", Shard: 1, CompactionLevel: 0, SourceBlocks: []string{"d", "e", "f"}}, + {Name: "3", Tenant: "A", Shard: 1, CompactionLevel: 0, SourceBlocks: []string{"j", "h", "i"}}, + {Name: "1", Tenant: "A", Shard: 1, CompactionLevel: 1, SourceBlocks: []string{"a", "b", "c"}}, + } + for _, p := range plans { + store.On("GetJobPlan", mock.Anything, p.Name).Return(p, nil) + } + + states := []*raft_log.CompactionJobState{ + {Name: "1", CompactionLevel: 1, Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_UNSPECIFIED}, + {Name: "2", CompactionLevel: 0, Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_UNSPECIFIED}, + {Name: "3", CompactionLevel: 0, Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_UNSPECIFIED}, + {Name: "4", CompactionLevel: 0, Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS}, + {Name: "5", CompactionLevel: 0, Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS}, + } + for _, s := range states { + scheduler.queue.put(s) + } + + test.AssertIdempotent(t, func(t *testing.T) { + s := scheduler.NewSchedule(nil, &raft.Log{Index: 1, AppendedAt: time.Unix(0, 0)}) + for j := range plans { + update, err := s.AssignJob() + require.NoError(t, err) + assert.Equal(t, plans[j], update.Plan) + assert.Equal(t, metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, update.State.Status) + assert.Equal(t, int64(config.LeaseDuration), update.State.LeaseExpiresAt) + assert.Equal(t, uint64(1), update.State.Token) + } + + update, err := s.AssignJob() + require.NoError(t, err) + assert.Nil(t, update) + }) +} + +func TestSchedule_ReAssign(t *testing.T) { + store := new(mockscheduler.MockJobStore) + config := Config{ + MaxFailures: 3, + LeaseDuration: 10 * time.Second, + } + + scheduler := NewScheduler(config, store, nil) + plans := []*raft_log.CompactionJobPlan{ + {Name: "1", Tenant: "A", Shard: 1, SourceBlocks: []string{"a", "b", "c"}}, + {Name: "2", Tenant: "A", Shard: 1, SourceBlocks: []string{"d", "e", "f"}}, + {Name: "3", Tenant: "A", Shard: 1, SourceBlocks: []string{"j", "h", "i"}}, + } + for _, p := range plans { + store.On("GetJobPlan", mock.Anything, p.Name).Return(p, nil) + } + + states := []*raft_log.CompactionJobState{ + {Name: "1", Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, Token: 1, LeaseExpiresAt: 0}, + {Name: "2", Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, Token: 1, LeaseExpiresAt: 0}, + {Name: "3", Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, Token: 1, LeaseExpiresAt: 0}, + } + for _, s := range states { + scheduler.queue.put(s) + } + + test.AssertIdempotent(t, func(t *testing.T) { + s := scheduler.NewSchedule(nil, &raft.Log{Index: 2, AppendedAt: time.Unix(0, 1)}) + for j := range plans { + update, err := s.AssignJob() + require.NoError(t, err) + assert.Equal(t, plans[j], update.Plan) + assert.Equal(t, metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, update.State.Status) + assert.Equal(t, int64(config.LeaseDuration)+1, update.State.LeaseExpiresAt) + assert.Equal(t, uint64(2), update.State.Token) + } + + update, err := s.AssignJob() + require.NoError(t, err) + assert.Nil(t, update) + }) +} + +func TestSchedule_UpdateAssign(t *testing.T) { + store := new(mockscheduler.MockJobStore) + config := Config{ + MaxFailures: 3, + LeaseDuration: 10 * time.Second, + } + + scheduler := NewScheduler(config, store, nil) + plans := []*raft_log.CompactionJobPlan{ + {Name: "1", Tenant: "A", Shard: 1, SourceBlocks: []string{"a", "b", "c"}}, + {Name: "2", Tenant: "A", Shard: 1, SourceBlocks: []string{"d", "e", "f"}}, + {Name: "3", Tenant: "A", Shard: 1, SourceBlocks: []string{"j", "h", "i"}}, + } + for _, p := range plans { + store.On("GetJobPlan", mock.Anything, p.Name).Return(p, nil) + } + + states := []*raft_log.CompactionJobState{ + {Name: "1", Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, Token: 1, LeaseExpiresAt: 0}, + {Name: "2", Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, Token: 1, LeaseExpiresAt: 0}, + {Name: "3", Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, Token: 1, LeaseExpiresAt: 0}, + } + for _, s := range states { + scheduler.queue.put(s) + } + + // Lease is extended without reassignment if update arrives after the + // expiration, but this is the first worker requested assignment. + test.AssertIdempotent(t, func(t *testing.T) { + updates := []*raft_log.CompactionJobStatusUpdate{ + {Name: "1", Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, Token: 1}, + {Name: "2", Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, Token: 1}, + {Name: "3", Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, Token: 1}, + } + + updatedAt := time.Second * 20 + s := scheduler.NewSchedule(nil, &raft.Log{Index: 2, AppendedAt: time.Unix(0, int64(updatedAt))}) + for i := range updates { + update := s.UpdateJob(updates[i]) + assert.Equal(t, metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, update.Status) + assert.Equal(t, int64(updatedAt)+int64(config.LeaseDuration), update.LeaseExpiresAt) + assert.Equal(t, uint64(1), update.Token) // Token must not change. + } + + update, err := s.AssignJob() + require.NoError(t, err) + assert.Nil(t, update) + }) + + // If the worker reports success status and its lease has expired but the + // job has not been reassigned, we accept the results. + test.AssertIdempotent(t, func(t *testing.T) { + updates := []*raft_log.CompactionJobStatusUpdate{ + {Name: "1", Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_SUCCESS, Token: 1}, + {Name: "2", Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_SUCCESS, Token: 1}, + {Name: "3", Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_SUCCESS, Token: 1}, + } + + updatedAt := time.Second * 20 + s := scheduler.NewSchedule(nil, &raft.Log{Index: 2, AppendedAt: time.Unix(0, int64(updatedAt))}) + for i := range updates { + assert.NotNil(t, s.UpdateJob(updates[i])) + } + + update, err := s.AssignJob() + require.NoError(t, err) + assert.Nil(t, update) + }) + + // The worker may be reassigned with the jobs it abandoned, + // if it requested assignments first. + test.AssertIdempotent(t, func(t *testing.T) { + updatedAt := time.Second * 20 + s := scheduler.NewSchedule(nil, &raft.Log{Index: 2, AppendedAt: time.Unix(0, int64(updatedAt))}) + for range plans { + update, err := s.AssignJob() + require.NoError(t, err) + assert.NotNil(t, update.State) + assert.NotNil(t, update.Plan) + assert.Equal(t, int64(updatedAt)+int64(config.LeaseDuration), update.State.LeaseExpiresAt) + assert.Equal(t, uint64(2), update.State.Token) // Token must change. + } + + update, err := s.AssignJob() + require.NoError(t, err) + assert.Nil(t, update) + }) +} + +func TestSchedule_Add(t *testing.T) { + store := new(mockscheduler.MockJobStore) + config := Config{ + MaxFailures: 3, + LeaseDuration: 10 * time.Second, + } + + scheduler := NewScheduler(config, store, nil) + plans := []*raft_log.CompactionJobPlan{ + {Name: "1", Tenant: "A", Shard: 1, SourceBlocks: []string{"a", "b", "c"}}, + {Name: "2", Tenant: "A", Shard: 1, SourceBlocks: []string{"d", "e", "f"}}, + {Name: "3", Tenant: "A", Shard: 1, SourceBlocks: []string{"j", "h", "i"}}, + } + + states := []*raft_log.CompactionJobState{ + {Name: "1", Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_UNSPECIFIED, AddedAt: 1, Token: 1}, + {Name: "2", Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_UNSPECIFIED, AddedAt: 1, Token: 1}, + {Name: "3", Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_UNSPECIFIED, AddedAt: 1, Token: 1}, + } + + test.AssertIdempotent(t, func(t *testing.T) { + s := scheduler.NewSchedule(nil, &raft.Log{Index: 1, AppendedAt: time.Unix(0, 1)}) + for i := range plans { + assert.Equal(t, states[i], s.AddJob(plans[i])) + } + }) +} diff --git a/pkg/experiment/metastore/compaction/scheduler/scheduler.go b/pkg/experiment/metastore/compaction/scheduler/scheduler.go new file mode 100644 index 0000000000..ee5c175146 --- /dev/null +++ b/pkg/experiment/metastore/compaction/scheduler/scheduler.go @@ -0,0 +1,153 @@ +package scheduler + +import ( + "flag" + "sync" + "time" + + "github.com/hashicorp/raft" + "github.com/prometheus/client_golang/prometheus" + "go.etcd.io/bbolt" + + "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" + "github.com/grafana/pyroscope/pkg/experiment/metastore/compaction" + "github.com/grafana/pyroscope/pkg/experiment/metastore/compaction/scheduler/store" + "github.com/grafana/pyroscope/pkg/iter" + "github.com/grafana/pyroscope/pkg/util" +) + +var _ compaction.Scheduler = (*Scheduler)(nil) + +// Compaction job scheduler. Jobs are prioritized by the compaction level, and +// the deadline time. +// +// Compaction workers own jobs while they are in progress. Ownership handling is +// implemented using lease deadlines and fencing tokens: +// https://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html + +// JobStore does not really store jobs as they are: it explicitly +// distinguishes between the job and the job state. +// +// Implementation note: block metadata should never be stored in StoreJob: +// those are already stored in the metadata index. +type JobStore interface { + StoreJobPlan(*bbolt.Tx, *raft_log.CompactionJobPlan) error + GetJobPlan(tx *bbolt.Tx, name string) (*raft_log.CompactionJobPlan, error) + DeleteJobPlan(tx *bbolt.Tx, name string) error + + StoreJobState(*bbolt.Tx, *raft_log.CompactionJobState) error + DeleteJobState(tx *bbolt.Tx, name string) error + ListEntries(*bbolt.Tx) iter.Iterator[*raft_log.CompactionJobState] + + CreateBuckets(*bbolt.Tx) error +} + +type Config struct { + MaxFailures uint64 `yaml:"compaction_max_failures" doc:""` + LeaseDuration time.Duration `yaml:"compaction_job_lease_duration" doc:""` +} + +func (c *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.Uint64Var(&c.MaxFailures, prefix+"compaction-max-failures", 3, "") + f.DurationVar(&c.LeaseDuration, prefix+"compaction-job-lease-duration", 15*time.Second, "") +} + +type Scheduler struct { + config Config + store JobStore + // Although the job queue is only accessed for writes + // synchronously, the mutex is needed to collect stats. + mu sync.Mutex + queue *schedulerQueue +} + +// NewScheduler creates a scheduler with the given lease duration. +// Typically, callers should update jobs at the interval not exceeding +// the half of the lease duration. +func NewScheduler(config Config, store JobStore, reg prometheus.Registerer) *Scheduler { + s := &Scheduler{ + config: config, + store: store, + queue: newJobQueue(), + } + collector := newStatsCollector(s) + util.RegisterOrGet(reg, collector) + return s +} + +func NewStore() *store.JobStore { + return store.NewJobStore() +} + +func (sc *Scheduler) NewSchedule(tx *bbolt.Tx, cmd *raft.Log) compaction.Schedule { + return &schedule{ + tx: tx, + token: cmd.Index, + now: cmd.AppendedAt, + scheduler: sc, + updates: make(map[string]*raft_log.CompactionJobState), + } +} + +func (sc *Scheduler) UpdateSchedule(tx *bbolt.Tx, _ *raft.Log, update *raft_log.CompactionPlanUpdate) error { + sc.mu.Lock() + defer sc.mu.Unlock() + + for _, job := range update.NewJobs { + if err := sc.store.StoreJobPlan(tx, job.Plan); err != nil { + return err + } + if err := sc.store.StoreJobState(tx, job.State); err != nil { + return err + } + sc.queue.put(job.State) + } + + for _, job := range update.UpdatedJobs { + if err := sc.store.StoreJobState(tx, job.State); err != nil { + return err + } + sc.queue.put(job.State) + } + + for _, job := range update.AssignedJobs { + if err := sc.store.StoreJobState(tx, job.State); err != nil { + return err + } + sc.queue.put(job.State) + } + + for _, job := range update.CompletedJobs { + name := job.State.Name + if err := sc.store.DeleteJobPlan(tx, name); err != nil { + return err + } + if err := sc.store.DeleteJobState(tx, name); err != nil { + return err + } + sc.queue.delete(name) + } + + return nil +} + +func (sc *Scheduler) Init(tx *bbolt.Tx) error { + return sc.store.CreateBuckets(tx) +} + +func (sc *Scheduler) Restore(tx *bbolt.Tx) error { + sc.mu.Lock() + defer sc.mu.Unlock() + // Reset in-memory state before loading entries from the store. + sc.queue.reset() + entries := sc.store.ListEntries(tx) + defer func() { + _ = entries.Close() + }() + for entries.Next() { + sc.queue.put(entries.At()) + } + // Zero all stats updated during Restore. + sc.queue.resetStats() + return entries.Err() +} diff --git a/pkg/experiment/metastore/compaction/scheduler/scheduler_queue.go b/pkg/experiment/metastore/compaction/scheduler/scheduler_queue.go new file mode 100644 index 0000000000..a3cf294cc1 --- /dev/null +++ b/pkg/experiment/metastore/compaction/scheduler/scheduler_queue.go @@ -0,0 +1,186 @@ +package scheduler + +import ( + "container/heap" + "slices" + "strings" + + "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" +) + +type schedulerQueue struct { + jobs map[string]*jobEntry + levels []*jobQueue +} + +func newJobQueue() *schedulerQueue { + return &schedulerQueue{ + jobs: make(map[string]*jobEntry), + } +} + +func (q *schedulerQueue) reset() { + clear(q.jobs) + clear(q.levels) + q.levels = q.levels[:0] +} + +func (q *schedulerQueue) put(state *raft_log.CompactionJobState) { + job, exists := q.jobs[state.Name] + level := q.level(state.CompactionLevel) + if exists { + level.update(job, state) + return + } + e := &jobEntry{CompactionJobState: state} + q.jobs[state.Name] = e + level.add(e) +} + +func (q *schedulerQueue) delete(name string) *raft_log.CompactionJobState { + if e, exists := q.jobs[name]; exists { + delete(q.jobs, name) + return q.level(e.CompactionLevel).delete(e) + } + return nil +} + +func (q *schedulerQueue) level(x uint32) *jobQueue { + s := x + 1 // Levels are 0-based. + if s >= uint32(len(q.levels)) { + q.levels = slices.Grow(q.levels, int(s))[:s] + } + level := q.levels[x] + if level == nil { + level = &jobQueue{ + jobs: new(priorityJobQueue), + stats: new(queueStats), + } + q.levels[x] = level + } + return level +} + +func (q *schedulerQueue) resetStats() { + for _, level := range q.levels { + if level != nil { + level.stats.reset() + } + } +} + +type jobQueue struct { + jobs *priorityJobQueue + stats *queueStats +} + +type queueStats struct { + // Counters. Updated on access. + addedTotal uint32 + completedTotal uint32 + assignedTotal uint32 + reassignedTotal uint32 + // Gauges. Updated periodically. + assigned uint32 + unassigned uint32 + reassigned uint32 + failed uint32 +} + +func (s *queueStats) reset() { + *s = queueStats{} +} + +type jobEntry struct { + index int // The index of the job in the heap. + *raft_log.CompactionJobState +} + +func (q *jobQueue) add(e *jobEntry) { + q.stats.addedTotal++ + heap.Push(q.jobs, e) +} + +func (q *jobQueue) update(e *jobEntry, state *raft_log.CompactionJobState) { + if e.Status == 0 && state.Status != 0 { + // Job given a status. + q.stats.assignedTotal++ + } + if e.Status != 0 && e.Token != state.Token { + // Token change. + q.stats.reassignedTotal++ + } + e.CompactionJobState = state + heap.Fix(q.jobs, e.index) + return +} + +func (q *jobQueue) delete(e *jobEntry) *raft_log.CompactionJobState { + q.stats.completedTotal++ + heap.Remove(q.jobs, e.index) + return e.CompactionJobState +} + +func (q *jobQueue) clone() priorityJobQueue { + c := make(priorityJobQueue, q.jobs.Len()) + for j, job := range *q.jobs { + jobCopy := *job + c[j] = &jobCopy + } + return c +} + +// The function determines the scheduling order of the jobs. +func compareJobs(a, b *jobEntry) int { + // Pick jobs in the "initial" (unspecified) state first. + if a.Status != b.Status { + return int(a.Status) - int(b.Status) + } + // Faulty jobs should wait. + if a.Failures != b.Failures { + return int(a.Failures) - int(b.Failures) + } + // Jobs with earlier deadlines should go first. + // A job that has been just added has no lease + // and will always go first. + if a.LeaseExpiresAt != b.LeaseExpiresAt { + return int(a.LeaseExpiresAt) - int(b.LeaseExpiresAt) + } + // Tiebreaker: the job name must not bias the order. + return strings.Compare(a.Name, b.Name) +} + +// TODO(kolesnikovae): container/heap is not very efficient, +// consider implementing own heap, specific to the case. +// A treap might be suitable as well. + +type priorityJobQueue []*jobEntry + +func (pq priorityJobQueue) Len() int { return len(pq) } + +func (pq priorityJobQueue) Less(i, j int) bool { + return compareJobs(pq[i], pq[j]) < 0 +} + +func (pq priorityJobQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +func (pq *priorityJobQueue) Push(x interface{}) { + n := len(*pq) + job := x.(*jobEntry) + job.index = n + *pq = append(*pq, job) +} + +func (pq *priorityJobQueue) Pop() interface{} { + old := *pq + n := len(old) + job := old[n-1] + old[n-1] = nil + job.index = -1 + *pq = old[0 : n-1] + return job +} diff --git a/pkg/experiment/metastore/compaction/scheduler/scheduler_queue_test.go b/pkg/experiment/metastore/compaction/scheduler/scheduler_queue_test.go new file mode 100644 index 0000000000..09fae32f70 --- /dev/null +++ b/pkg/experiment/metastore/compaction/scheduler/scheduler_queue_test.go @@ -0,0 +1,151 @@ +package scheduler + +import ( + "container/heap" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" +) + +func TestJobQueue_order(t *testing.T) { + items := []*raft_log.CompactionJobState{ + { + Name: "job-6", + CompactionLevel: 0, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_UNSPECIFIED, + LeaseExpiresAt: 5, + Failures: 0, + }, + { + Name: "job-0", + CompactionLevel: 1, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_UNSPECIFIED, + LeaseExpiresAt: 2, + Failures: 0, + }, + { + Name: "job-1", + CompactionLevel: 1, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_UNSPECIFIED, + LeaseExpiresAt: 2, + Failures: 0, + }, + { + Name: "job-2", + CompactionLevel: 0, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, + LeaseExpiresAt: 2, + Failures: 0, + }, + { + Name: "job-5", + CompactionLevel: 1, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_UNSPECIFIED, + LeaseExpiresAt: 3, + Failures: 0, + }, + { + Name: "job-3", + CompactionLevel: 0, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, + LeaseExpiresAt: 2, + Failures: 5, + }, + { + Name: "job-4", + CompactionLevel: 0, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, + LeaseExpiresAt: 1, + Failures: 5, + }, + } + + test := func(items []*raft_log.CompactionJobState) { + q := newJobQueue() + for _, item := range items { + q.put(item) + } + + j3 := q.delete("job-1") + j1 := q.delete("job-3") + jx := q.delete("job-x") + assert.Nil(t, jx) + + q.put(j1) + q.put(j3) + q.put(j3) + q.put(j1) + + q.put(&raft_log.CompactionJobState{ + Name: "job-4", + CompactionLevel: 0, + Status: metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS, + LeaseExpiresAt: 3, // Should be after job-3. + Failures: 5, + }) + + expected := []string{"job-6", "job-2", "job-3", "job-4", "job-0", "job-1", "job-5"} + dequeued := make([]string, 0, len(items)) + for range items { + x := jobQueuePop(q) + assert.NotNil(t, x) + dequeued = append(dequeued, x.Name) + } + assert.Equal(t, expected, dequeued) + assert.Nil(t, jobQueuePop(q)) + } + + rnd := rand.New(rand.NewSource(123)) + for i := 0; i < 25; i++ { + rnd.Shuffle(len(items), func(i, j int) { + items[i], items[j] = items[j], items[i] + }) + test(items) + } +} + +func TestJobQueue_delete(t *testing.T) { + q := newJobQueue() + items := []*raft_log.CompactionJobState{ + {Name: "job-1"}, + {Name: "job-2"}, + {Name: "job-3"}, + {Name: "job-4"}, + } + + for _, item := range items { + q.put(item) + } + + for _, item := range items { + q.delete(item.Name) + } + + assert.Nil(t, jobQueuePop(q)) +} + +func TestJobQueue_empty(t *testing.T) { + q := newJobQueue() + q.delete("job-1") + assert.Nil(t, jobQueuePop(q)) + q.put(&raft_log.CompactionJobState{Name: "job-1"}) + q.delete("job-1") + assert.Nil(t, jobQueuePop(q)) +} + +// The function is for testing purposes only. +func jobQueuePop(q *schedulerQueue) *raft_log.CompactionJobState { + for i := range q.levels { + level := q.level(uint32(i)) + if level.jobs.Len() > 0 { + x := heap.Pop(level.jobs).(*jobEntry).CompactionJobState + delete(q.jobs, x.Name) + return x + } + } + return nil +} diff --git a/pkg/experiment/metastore/compaction/scheduler/scheduler_test.go b/pkg/experiment/metastore/compaction/scheduler/scheduler_test.go new file mode 100644 index 0000000000..c104eff0ed --- /dev/null +++ b/pkg/experiment/metastore/compaction/scheduler/scheduler_test.go @@ -0,0 +1,89 @@ +package scheduler + +import ( + "testing" + + "github.com/hashicorp/raft" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" + "github.com/grafana/pyroscope/pkg/iter" + "github.com/grafana/pyroscope/pkg/test/mocks/mockscheduler" +) + +func TestScheduler_UpdateSchedule(t *testing.T) { + store := new(mockscheduler.MockJobStore) + store.On("StoreJobPlan", mock.Anything, &raft_log.CompactionJobPlan{Name: "1"}).Return(nil).Once() + store.On("StoreJobState", mock.Anything, &raft_log.CompactionJobState{Name: "1"}).Return(nil).Once() + store.On("StoreJobState", mock.Anything, &raft_log.CompactionJobState{Name: "2"}).Return(nil).Once() + store.On("DeleteJobPlan", mock.Anything, "3").Return(nil).Once() + store.On("DeleteJobState", mock.Anything, "3").Return(nil).Once() + + scheduler := NewScheduler(Config{}, store, nil) + scheduler.queue.put(&raft_log.CompactionJobState{Name: "1", Token: 1}) + scheduler.queue.put(&raft_log.CompactionJobState{Name: "2", Token: 1}) + scheduler.queue.put(&raft_log.CompactionJobState{Name: "3", Token: 1}) + + err := scheduler.UpdateSchedule(nil, &raft.Log{Index: 2}, &raft_log.CompactionPlanUpdate{ + NewJobs: []*raft_log.NewCompactionJob{{ + State: &raft_log.CompactionJobState{Name: "1"}, + Plan: &raft_log.CompactionJobPlan{Name: "1"}, + }}, + UpdatedJobs: []*raft_log.UpdatedCompactionJob{{ + State: &raft_log.CompactionJobState{Name: "2"}, + }}, + CompletedJobs: []*raft_log.CompletedCompactionJob{{ + State: &raft_log.CompactionJobState{Name: "3"}, + }}, + }) + + require.NoError(t, err) + s := scheduler.NewSchedule(nil, &raft.Log{Index: 3}) + + store.On("GetJobPlan", mock.Anything, "1").Return(new(raft_log.CompactionJobPlan), nil).Once() + assigment, err := s.AssignJob() + require.NoError(t, err) + assert.NotNil(t, assigment) + + store.On("GetJobPlan", mock.Anything, "2").Return(new(raft_log.CompactionJobPlan), nil).Once() + assigment, err = s.AssignJob() + require.NoError(t, err) + assert.NotNil(t, assigment) + + assigment, err = s.AssignJob() + require.NoError(t, err) + assert.Nil(t, assigment) + + store.AssertExpectations(t) +} + +func TestScheduler_Restore(t *testing.T) { + store := new(mockscheduler.MockJobStore) + scheduler := NewScheduler(Config{}, store, nil) + + store.On("ListEntries", mock.Anything).Return(iter.NewSliceIterator([]*raft_log.CompactionJobState{ + {Name: "1", Token: 1}, + {Name: "2", Token: 1}, + })) + + require.NoError(t, scheduler.Restore(nil)) + s := scheduler.NewSchedule(nil, &raft.Log{Index: 3}) + + store.On("GetJobPlan", mock.Anything, "1").Return(new(raft_log.CompactionJobPlan), nil).Once() + assigment, err := s.AssignJob() + require.NoError(t, err) + assert.NotNil(t, assigment) + + store.On("GetJobPlan", mock.Anything, "2").Return(new(raft_log.CompactionJobPlan), nil).Once() + assigment, err = s.AssignJob() + require.NoError(t, err) + assert.NotNil(t, assigment) + + assigment, err = s.AssignJob() + require.NoError(t, err) + assert.Nil(t, assigment) + + store.AssertExpectations(t) +} diff --git a/pkg/experiment/metastore/compaction/scheduler/store/job_plan_store.go b/pkg/experiment/metastore/compaction/scheduler/store/job_plan_store.go new file mode 100644 index 0000000000..95799433d9 --- /dev/null +++ b/pkg/experiment/metastore/compaction/scheduler/store/job_plan_store.go @@ -0,0 +1,47 @@ +package store + +import ( + "errors" + "fmt" + + "go.etcd.io/bbolt" + + "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" + "github.com/grafana/pyroscope/pkg/experiment/metastore/store" +) + +var jobPlanBucketName = []byte("compaction_job_plan") + +var ErrInvalidJobPlan = errors.New("invalid job plan entry") + +type JobPlanStore struct{ bucketName []byte } + +func NewJobPlanStore() *JobPlanStore { + return &JobPlanStore{bucketName: jobPlanBucketName} +} + +func (s JobPlanStore) CreateBuckets(tx *bbolt.Tx) error { + _, err := tx.CreateBucketIfNotExists(s.bucketName) + return err +} + +func (s JobPlanStore) StoreJobPlan(tx *bbolt.Tx, plan *raft_log.CompactionJobPlan) error { + v, _ := plan.MarshalVT() + return tx.Bucket(s.bucketName).Put([]byte(plan.Name), v) +} + +func (s JobPlanStore) GetJobPlan(tx *bbolt.Tx, name string) (*raft_log.CompactionJobPlan, error) { + b := tx.Bucket(s.bucketName).Get([]byte(name)) + if b == nil { + return nil, fmt.Errorf("loading job plan %s: %w", name, store.ErrorNotFound) + } + var v raft_log.CompactionJobPlan + if err := v.UnmarshalVT(b); err != nil { + return nil, fmt.Errorf("%w: %v", ErrInvalidJobPlan, err) + } + return &v, nil +} + +func (s JobPlanStore) DeleteJobPlan(tx *bbolt.Tx, name string) error { + return tx.Bucket(s.bucketName).Delete([]byte(name)) +} diff --git a/pkg/experiment/metastore/compaction/scheduler/store/job_plan_store_test.go b/pkg/experiment/metastore/compaction/scheduler/store/job_plan_store_test.go new file mode 100644 index 0000000000..10b90a5b91 --- /dev/null +++ b/pkg/experiment/metastore/compaction/scheduler/store/job_plan_store_test.go @@ -0,0 +1,46 @@ +package store + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" + "github.com/grafana/pyroscope/pkg/experiment/metastore/store" + "github.com/grafana/pyroscope/pkg/test" +) + +func TestJobPlanStore(t *testing.T) { + db := test.BoltDB(t) + + s := NewJobStore() + tx, err := db.Begin(true) + require.NoError(t, err) + require.NoError(t, s.CreateBuckets(tx)) + assert.NoError(t, s.StoreJobPlan(tx, &raft_log.CompactionJobPlan{Name: "1"})) + require.NoError(t, tx.Commit()) + + s = NewJobStore() + tx, err = db.Begin(false) + require.NoError(t, err) + state, err := s.GetJobPlan(tx, "2") + require.ErrorIs(t, err, store.ErrorNotFound) + require.Nil(t, state) + state, err = s.GetJobPlan(tx, "1") + require.NoError(t, err) + assert.Equal(t, "1", state.Name) + require.NoError(t, tx.Rollback()) + + tx, err = db.Begin(true) + require.NoError(t, err) + require.NoError(t, s.DeleteJobPlan(tx, "1")) + require.NoError(t, tx.Commit()) + + tx, err = db.Begin(false) + require.NoError(t, err) + state, err = s.GetJobPlan(tx, "1") + require.ErrorIs(t, err, store.ErrorNotFound) + require.Nil(t, state) + require.NoError(t, tx.Rollback()) +} diff --git a/pkg/experiment/metastore/compaction/scheduler/store/job_state_store.go b/pkg/experiment/metastore/compaction/scheduler/store/job_state_store.go new file mode 100644 index 0000000000..183ac8b378 --- /dev/null +++ b/pkg/experiment/metastore/compaction/scheduler/store/job_state_store.go @@ -0,0 +1,88 @@ +package store + +import ( + "errors" + "fmt" + + "go.etcd.io/bbolt" + + "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" + "github.com/grafana/pyroscope/pkg/experiment/metastore/store" + "github.com/grafana/pyroscope/pkg/iter" +) + +var jobStateBucketName = []byte("compaction_job_state") + +var ErrInvalidJobState = errors.New("invalid job state entry") + +type JobStateStore struct{ bucketName []byte } + +func NewJobStateStore() *JobStateStore { + return &JobStateStore{bucketName: jobStateBucketName} +} + +func (s JobStateStore) CreateBuckets(tx *bbolt.Tx) error { + _, err := tx.CreateBucketIfNotExists(s.bucketName) + return err +} + +func (s JobStateStore) GetJobState(tx *bbolt.Tx, name string) (*raft_log.CompactionJobState, error) { + b := tx.Bucket(s.bucketName).Get([]byte(name)) + if b == nil { + return nil, fmt.Errorf("loading job state %s: %w", name, store.ErrorNotFound) + } + var v raft_log.CompactionJobState + if err := v.UnmarshalVT(b); err != nil { + return nil, fmt.Errorf("%w: %v", ErrInvalidJobState, err) + } + return &v, nil +} + +func (s JobStateStore) StoreJobState(tx *bbolt.Tx, state *raft_log.CompactionJobState) error { + v, _ := state.MarshalVT() + return tx.Bucket(s.bucketName).Put([]byte(state.Name), v) +} + +func (s JobStateStore) DeleteJobState(tx *bbolt.Tx, name string) error { + return tx.Bucket(s.bucketName).Delete([]byte(name)) +} + +func (s JobStateStore) ListEntries(tx *bbolt.Tx) iter.Iterator[*raft_log.CompactionJobState] { + return newJobEntriesIterator(tx.Bucket(s.bucketName)) +} + +type jobEntriesIterator struct { + iter *store.CursorIterator + cur *raft_log.CompactionJobState + err error +} + +func newJobEntriesIterator(bucket *bbolt.Bucket) *jobEntriesIterator { + return &jobEntriesIterator{iter: store.NewCursorIter(nil, bucket.Cursor())} +} + +func (x *jobEntriesIterator) Next() bool { + if x.err != nil || !x.iter.Next() { + return false + } + e := x.iter.At() + var s raft_log.CompactionJobState + x.err = s.UnmarshalVT(e.Value) + if x.err != nil { + x.err = fmt.Errorf("%w: %v", ErrInvalidJobState, x.err) + return false + } + x.cur = &s + return true +} + +func (x *jobEntriesIterator) At() *raft_log.CompactionJobState { return x.cur } + +func (x *jobEntriesIterator) Close() error { return x.iter.Close() } + +func (x *jobEntriesIterator) Err() error { + if err := x.iter.Err(); err != nil { + return err + } + return x.err +} diff --git a/pkg/experiment/metastore/compaction/scheduler/store/job_state_store_test.go b/pkg/experiment/metastore/compaction/scheduler/store/job_state_store_test.go new file mode 100644 index 0000000000..0216ebb534 --- /dev/null +++ b/pkg/experiment/metastore/compaction/scheduler/store/job_state_store_test.go @@ -0,0 +1,51 @@ +package store + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" + "github.com/grafana/pyroscope/pkg/experiment/metastore/store" + "github.com/grafana/pyroscope/pkg/test" +) + +func TestJobStateStore(t *testing.T) { + db := test.BoltDB(t) + + s := NewJobStore() + tx, err := db.Begin(true) + require.NoError(t, err) + require.NoError(t, s.CreateBuckets(tx)) + assert.NoError(t, s.StoreJobState(tx, &raft_log.CompactionJobState{Name: "1"})) + assert.NoError(t, s.StoreJobState(tx, &raft_log.CompactionJobState{Name: "2"})) + assert.NoError(t, s.StoreJobState(tx, &raft_log.CompactionJobState{Name: "3"})) + require.NoError(t, tx.Commit()) + + s = NewJobStore() + tx, err = db.Begin(true) + require.NoError(t, err) + state, err := s.GetJobState(tx, "2") + require.NoError(t, err) + assert.Equal(t, "2", state.Name) + require.NoError(t, s.DeleteJobState(tx, "2")) + state, err = s.GetJobState(tx, "2") + require.ErrorIs(t, err, store.ErrorNotFound) + require.Nil(t, state) + require.NoError(t, tx.Commit()) + + tx, err = db.Begin(true) + require.NoError(t, err) + + iter := s.ListEntries(tx) + expected := []string{"1", "3"} + var i int + for iter.Next() { + assert.Equal(t, expected[i], iter.At().Name) + i++ + } + assert.Nil(t, iter.Err()) + assert.Nil(t, iter.Close()) + require.NoError(t, tx.Rollback()) +} diff --git a/pkg/experiment/metastore/compaction/scheduler/store/job_store.go b/pkg/experiment/metastore/compaction/scheduler/store/job_store.go new file mode 100644 index 0000000000..865b33a6b1 --- /dev/null +++ b/pkg/experiment/metastore/compaction/scheduler/store/job_store.go @@ -0,0 +1,27 @@ +package store + +import ( + "go.etcd.io/bbolt" +) + +type JobStore struct { + *JobStateStore + *JobPlanStore +} + +func NewJobStore() *JobStore { + return &JobStore{ + JobStateStore: NewJobStateStore(), + JobPlanStore: NewJobPlanStore(), + } +} + +func (s JobStore) CreateBuckets(tx *bbolt.Tx) error { + if err := s.JobStateStore.CreateBuckets(tx); err != nil { + return err + } + if err := s.JobPlanStore.CreateBuckets(tx); err != nil { + return err + } + return nil +} diff --git a/pkg/experiment/metastore/compaction_planner.go b/pkg/experiment/metastore/compaction_planner.go deleted file mode 100644 index 96fd25e1ca..0000000000 --- a/pkg/experiment/metastore/compaction_planner.go +++ /dev/null @@ -1,115 +0,0 @@ -package metastore - -import ( - "flag" - "time" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/grafana/pyroscope/pkg/util" -) - -type CompactionConfig struct { - JobLeaseDuration time.Duration `yaml:"job_lease_duration"` - JobMaxFailures int `yaml:"job_max_failures"` -} - -func (cfg *CompactionConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - f.DurationVar(&cfg.JobLeaseDuration, prefix+"job-lease-duration", 15*time.Second, "") - f.IntVar(&cfg.JobMaxFailures, prefix+"job-max-failures", 3, "") -} - -var ( - // TODO aleks: for illustration purposes, to be moved externally - globalCompactionStrategy = compactionStrategy{ - levels: map[uint32]compactionLevelStrategy{ - 0: {maxBlocks: 20}, - }, - defaultStrategy: compactionLevelStrategy{ - maxBlocks: 10, - }, - maxCompactionLevel: 3, - // 0: 0.5 - // 1: 10s - // 2: 100s - // 3: 1000s // 16m40s - } -) - -type compactionStrategy struct { - levels map[uint32]compactionLevelStrategy - defaultStrategy compactionLevelStrategy - maxCompactionLevel uint32 -} - -type compactionLevelStrategy struct { - maxBlocks int - maxTotalSizeBytes uint64 -} - -func getStrategyForLevel(compactionLevel uint32) compactionLevelStrategy { - strategy, ok := globalCompactionStrategy.levels[compactionLevel] - if !ok { - strategy = globalCompactionStrategy.defaultStrategy - } - return strategy -} - -func (s compactionLevelStrategy) shouldCreateJob(blocks []string) bool { - return len(blocks) >= s.maxBlocks -} - -type compactionMetrics struct { - addedBlocks *prometheus.CounterVec - deletedBlocks *prometheus.CounterVec - addedJobs *prometheus.CounterVec - assignedJobs *prometheus.CounterVec - completedJobs *prometheus.CounterVec - retriedJobs *prometheus.CounterVec - discardedJobs *prometheus.CounterVec -} - -func newCompactionMetrics(reg prometheus.Registerer) *compactionMetrics { - m := &compactionMetrics{ - addedBlocks: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "compaction_added_blocks_count", - Help: "The number of blocks added for compaction", - }, []string{"shard", "tenant", "level"}), - deletedBlocks: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "compaction_deleted_blocks_count", - Help: "The number of blocks deleted as a result of compaction", - }, []string{"shard", "tenant", "level"}), - addedJobs: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "compaction_added_jobs_count", - Help: "The number of created compaction jobs", - }, []string{"shard", "tenant", "level"}), - assignedJobs: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "compaction_assigned_jobs_count", - Help: "The number of assigned compaction jobs", - }, []string{"shard", "tenant", "level"}), - completedJobs: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "compaction_completed_jobs_count", - Help: "The number of completed compaction jobs", - }, []string{"shard", "tenant", "level"}), - retriedJobs: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "compaction_retried_jobs_count", - Help: "The number of retried compaction jobs", - }, []string{"shard", "tenant", "level"}), - discardedJobs: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "compaction_discarded_jobs_count", - Help: "The number of discarded compaction jobs", - }, []string{"shard", "tenant", "level"}), - } - if reg != nil { - util.Register(reg, - m.addedBlocks, - m.deletedBlocks, - m.addedJobs, - m.assignedJobs, - m.completedJobs, - m.retriedJobs, - m.discardedJobs, - ) - } - return m -} diff --git a/pkg/experiment/metastore/compaction_queue.go b/pkg/experiment/metastore/compaction_queue.go deleted file mode 100644 index a3bc2bec8d..0000000000 --- a/pkg/experiment/metastore/compaction_queue.go +++ /dev/null @@ -1,225 +0,0 @@ -package metastore - -import ( - "container/heap" - "slices" - "sync" - - "github.com/grafana/pyroscope/pkg/experiment/metastore/compactionpb" -) - -// A priority queue for compaction jobs. Jobs are prioritized by the compaction -// level, and the deadline time. -// -// The queue is supposed to be used by the compaction planner to schedule jobs. -// -// Compaction workers own jobs while they are in progress. Ownership handling is -// implemented using lease deadlines and fencing tokens: -// https://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html - -type jobQueue struct { - mu sync.Mutex - jobs map[string]*jobQueueEntry - pq priorityQueue - - lease int64 -} - -// newJobQueue creates a new job queue with the given lease duration. -// -// Typically, callers should update jobs at the interval not exceeding -// the half of the lease duration. -func newJobQueue(lease int64) *jobQueue { - pq := make(priorityQueue, 0) - heap.Init(&pq) - return &jobQueue{ - jobs: make(map[string]*jobQueueEntry), - pq: pq, - lease: lease, - } -} - -type jobQueueEntry struct { - // The index of the job in the heap. - index int - // The original proto message. - *compactionpb.CompactionJob -} - -func (c *jobQueueEntry) less(x *jobQueueEntry) bool { - if c.Status != x.Status { - // Pick jobs in the "initial" (unspecified) state first. - return c.Status < x.Status - } - if c.CompactionLevel != x.CompactionLevel { - // Compact lower level jobs first. - return c.CompactionLevel < x.CompactionLevel - } - if c.LeaseExpiresAt != x.LeaseExpiresAt { - // Jobs with earlier deadlines should be at the top. - return c.LeaseExpiresAt < x.LeaseExpiresAt - } - - return c.Name < x.Name -} - -func (q *jobQueue) dequeue(now int64, raftLogIndex uint64) *compactionpb.CompactionJob { - q.mu.Lock() - defer q.mu.Unlock() - for q.pq.Len() > 0 { - job := q.pq[0] - if job.Status == compactionpb.CompactionStatus_COMPACTION_STATUS_IN_PROGRESS && - now <= job.LeaseExpiresAt { - // If the top job is in progress and not expired, stop checking further - return nil - } - if job.Status == compactionpb.CompactionStatus_COMPACTION_STATUS_CANCELLED { - // if we've reached cancelled jobs in the queue we have no work left - return nil - } - // Actually remove it from the heap, update and push it back. - heap.Pop(&q.pq) - job.LeaseExpiresAt = q.getNewDeadline(now) - job.Status = compactionpb.CompactionStatus_COMPACTION_STATUS_IN_PROGRESS - // If job.status is "in progress", the ownership of the job is being revoked. - job.RaftLogIndex = raftLogIndex - heap.Push(&q.pq, job) - return job.CompactionJob - } - return nil -} - -func (q *jobQueue) update(name string, now int64, raftLogIndex uint64) bool { - q.mu.Lock() - defer q.mu.Unlock() - if job, exists := q.jobs[name]; exists { - if job.RaftLogIndex > raftLogIndex { - return false - } - job.LeaseExpiresAt = q.getNewDeadline(now) - job.Status = compactionpb.CompactionStatus_COMPACTION_STATUS_IN_PROGRESS - // De-prioritize the job, as the deadline has been postponed. - heap.Fix(&q.pq, job.index) - return true - } - return false -} - -func (q *jobQueue) cancel(name string) { - q.mu.Lock() - defer q.mu.Unlock() - if job, exists := q.jobs[name]; exists { - job.Status = compactionpb.CompactionStatus_COMPACTION_STATUS_CANCELLED - heap.Fix(&q.pq, job.index) - } -} - -func (q *jobQueue) getNewDeadline(now int64) int64 { - return now + q.lease -} - -func (q *jobQueue) isOwner(name string, raftLogIndex uint64) bool { - q.mu.Lock() - defer q.mu.Unlock() - if job, exists := q.jobs[name]; exists { - if job.RaftLogIndex > raftLogIndex { - return false - } - } - return true -} - -func (q *jobQueue) evict(name string, raftLogIndex uint64) bool { - q.mu.Lock() - defer q.mu.Unlock() - if job, exists := q.jobs[name]; exists { - if job.RaftLogIndex > raftLogIndex { - return false - } - delete(q.jobs, name) - heap.Remove(&q.pq, job.index) - } - return true -} - -func (q *jobQueue) enqueue(job *compactionpb.CompactionJob) bool { - q.mu.Lock() - defer q.mu.Unlock() - if _, exists := q.jobs[job.Name]; exists { - return false - } - j := &jobQueueEntry{CompactionJob: job} - q.jobs[job.Name] = j - heap.Push(&q.pq, j) - return true -} - -func (q *jobQueue) putJob(job *compactionpb.CompactionJob) { - q.jobs[job.Name] = &jobQueueEntry{CompactionJob: job} -} - -func (q *jobQueue) rebuild() { - q.pq = slices.Grow(q.pq[0:], len(q.jobs)) - for _, job := range q.jobs { - q.pq = append(q.pq, job) - } - heap.Init(&q.pq) -} - -func (q *jobQueue) stats() (int, []string, []string, []string, []string, []string) { - q.mu.Lock() - defer q.mu.Unlock() - - newJobs := make([]string, 0) - inProgressJobs := make([]string, 0) - completedJobs := make([]string, 0) - failedJobs := make([]string, 0) - cancelledJobs := make([]string, 0) - for _, job := range q.jobs { - switch job.Status { - case compactionpb.CompactionStatus_COMPACTION_STATUS_UNSPECIFIED: - newJobs = append(newJobs, job.Name) - case compactionpb.CompactionStatus_COMPACTION_STATUS_IN_PROGRESS: - inProgressJobs = append(inProgressJobs, job.Name) - case compactionpb.CompactionStatus_COMPACTION_STATUS_SUCCESS: - completedJobs = append(completedJobs, job.Name) - case compactionpb.CompactionStatus_COMPACTION_STATUS_FAILURE: - failedJobs = append(failedJobs, job.Name) - case compactionpb.CompactionStatus_COMPACTION_STATUS_CANCELLED: - cancelledJobs = append(cancelledJobs, job.Name) - } - } - return len(q.jobs), newJobs, inProgressJobs, completedJobs, failedJobs, cancelledJobs -} - -// TODO(kolesnikovae): container/heap is not very efficient, -// consider implementing own heap, specific to the case. - -type priorityQueue []*jobQueueEntry - -func (pq priorityQueue) Len() int { return len(pq) } - -func (pq priorityQueue) Less(i, j int) bool { return pq[i].less(pq[j]) } - -func (pq priorityQueue) Swap(i, j int) { - pq[i], pq[j] = pq[j], pq[i] - pq[i].index = i - pq[j].index = j -} - -func (pq *priorityQueue) Push(x interface{}) { - n := len(*pq) - job := x.(*jobQueueEntry) - job.index = n - *pq = append(*pq, job) -} - -func (pq *priorityQueue) Pop() interface{} { - old := *pq - n := len(old) - job := old[n-1] - old[n-1] = nil - job.index = -1 - *pq = old[0 : n-1] - return job -} diff --git a/pkg/experiment/metastore/compaction_queue_test.go b/pkg/experiment/metastore/compaction_queue_test.go deleted file mode 100644 index 09f1af46dd..0000000000 --- a/pkg/experiment/metastore/compaction_queue_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package metastore - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/grafana/pyroscope/pkg/experiment/metastore/compactionpb" -) - -func Test_compactionJobQueue(t *testing.T) { - var now int64 // Timestamp of the raft command. - lease := int64(10) // Job lease duration. - q := newJobQueue(lease) - - assert.True(t, q.enqueue(&compactionpb.CompactionJob{ - Name: "job1", - RaftLogIndex: 1, - CompactionLevel: 0, - })) - assert.True(t, q.enqueue(&compactionpb.CompactionJob{ - Name: "job2", - RaftLogIndex: 2, - CompactionLevel: 1, - })) - assert.True(t, q.enqueue(&compactionpb.CompactionJob{ - Name: "job3", - RaftLogIndex: 3, - CompactionLevel: 0, - })) - - // Token here is the raft command index. - assertJob(t, q.dequeue(now, 4), "job1", 4) // L0 - assertJob(t, q.dequeue(now, 5), "job3", 5) // L0 - assertJob(t, q.dequeue(now, 6), "job2", 6) // L1 - require.Nil(t, q.dequeue(now, 7)) // No jobs left. - require.Nil(t, q.dequeue(now, 8)) // No jobs left. - - // Time has passed. Updating the jobs: all but job1. - now += lease / 2 - assert.True(t, q.update("job3", now, 9)) // Postpone the deadline. - assert.True(t, q.update("job2", now, 10)) // Postpone the deadline. - require.Nil(t, q.dequeue(now, 11)) // No jobs left. - - // Time has passed: the initial lease has expired. - now += lease/2 + 1 - assertJob(t, q.dequeue(now, 12), "job1", 12) // Seizing ownership of expired job. - require.Nil(t, q.dequeue(now, 13)) // No jobs available yet. - - // Owner of the job1 awakes and tries to update the job. - assert.False(t, q.update("job1", now, 4)) // Postpone the deadline; stale owner is rejected. - assert.True(t, q.update("job1", now, 12)) // Postpone the deadline; new owner succeeds. - - assert.False(t, q.evict("job1", 4)) // Evicting the job; stale owner is rejected. - assert.True(t, q.evict("job1", 12)) // Postpone the deadline; new owner succeeds. - - // Jobs are evicted in the end, regardless of the status. - // We ignore expired lease, as long as nobody else has taken the job. - assert.True(t, q.evict("job2", 10)) - assert.True(t, q.evict("job3", 9)) - - // No jobs left. - require.Nil(t, q.dequeue(now, 14)) -} - -func assertJob(t *testing.T, j *compactionpb.CompactionJob, name string, commitIndex uint64) { - require.NotNil(t, j) - assert.Equal(t, name, j.Name) - assert.Equal(t, commitIndex, j.RaftLogIndex) -} diff --git a/pkg/experiment/metastore/compaction_raft_handler.go b/pkg/experiment/metastore/compaction_raft_handler.go index 40a9708901..927206f9e9 100644 --- a/pkg/experiment/metastore/compaction_raft_handler.go +++ b/pkg/experiment/metastore/compaction_raft_handler.go @@ -1,706 +1,194 @@ package metastore import ( - "encoding/binary" - "fmt" - "math" - "strings" - "sync" - "time" - - "github.com/cespare/xxhash/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/hashicorp/raft" - "github.com/prometheus/client_golang/prometheus" "go.etcd.io/bbolt" metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" - "github.com/grafana/pyroscope/pkg/experiment/metastore/compactionpb" - "github.com/grafana/pyroscope/pkg/experiment/metastore/index" - "github.com/grafana/pyroscope/pkg/experiment/metastore/storeutils" + "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" + "github.com/grafana/pyroscope/pkg/experiment/metastore/compaction" ) -type CompactorIndex interface { - FindBlock(tx *bbolt.Tx, shard uint32, tenant string, block string) *metastorev1.BlockMeta - FindPartitionMetas(block string) []*index.PartitionMeta - CreatePartitionKey(string) index.PartitionKey - ReplaceBlocks(tx *bbolt.Tx, compacted []string, shard uint32, tenant string, blocks []*metastorev1.BlockMeta) +type IndexReplacer interface { + ReplaceBlocks(*bbolt.Tx, *metastorev1.CompactedBlocks) error } -type DeletionMarker interface { - Mark(tx *bbolt.Tx, shard uint32, tenant string, block string, now int64) error +type TombstoneDeleter interface { + DeleteTombstones(*bbolt.Tx, *raft.Log, ...*metastorev1.Tombstones) error + AddTombstones(*bbolt.Tx, *raft.Log, *metastorev1.Tombstones) error } type CompactionCommandHandler struct { - logger log.Logger - config CompactionConfig - index CompactorIndex - marker DeletionMarker - metrics *compactionMetrics - - compactionJobBlockQueues map[tenantShard]*compactionJobBlockQueue - compactionJobQueue *jobQueue -} - -type tenantShard struct { - tenant string - shard uint32 -} - -type compactionJobBlockQueue struct { - mu sync.Mutex - blocksByLevel map[uint32][]string -} - -type pollStateUpdate struct { - newBlocks map[tenantShard][]*metastorev1.BlockMeta - deletedBlocks map[tenantShard][]string - updatedBlockQueues map[tenantShard][]uint32 - deletedJobs map[tenantShard][]string - newJobs []string - updatedJobs []string + logger log.Logger + index IndexReplacer + compactor compaction.Compactor + planner compaction.Planner + scheduler compaction.Scheduler + tombstones TombstoneDeleter } func NewCompactionCommandHandler( logger log.Logger, - config CompactionConfig, - index CompactorIndex, - marker DeletionMarker, - reg prometheus.Registerer, + index IndexReplacer, + compactor compaction.Compactor, + planner compaction.Planner, + scheduler compaction.Scheduler, + tombstones TombstoneDeleter, ) *CompactionCommandHandler { return &CompactionCommandHandler{ - logger: logger, - index: index, - marker: marker, - compactionJobBlockQueues: make(map[tenantShard]*compactionJobBlockQueue), - compactionJobQueue: newJobQueue(config.JobLeaseDuration.Nanoseconds()), - metrics: newCompactionMetrics(reg), - config: config, - } -} - -func (h *CompactionCommandHandler) PollCompactionJobs(tx *bbolt.Tx, cmd *raft.Log, request *metastorev1.PollCompactionJobsRequest) (resp *metastorev1.PollCompactionJobsResponse, err error) { - level.Debug(h.logger).Log( - "msg", "applying poll compaction jobs", - "num_updates", len(request.JobStatusUpdates), - "job_capacity", request.JobCapacity, - "raft_log_index", cmd.Index) - - stateUpdate := &pollStateUpdate{ - newBlocks: make(map[tenantShard][]*metastorev1.BlockMeta), - deletedBlocks: make(map[tenantShard][]string), - newJobs: make([]string, 0), - updatedBlockQueues: make(map[tenantShard][]uint32), - deletedJobs: make(map[tenantShard][]string), - updatedJobs: make([]string, 0), - } - - for _, jobUpdate := range request.JobStatusUpdates { - job := h.findJob(jobUpdate.JobName) - if job == nil { - level.Error(h.logger).Log("msg", "error processing update for compaction job, job not found", "job", jobUpdate.JobName, "err", err) - continue - } - if !h.compactionJobQueue.isOwner(job.Name, jobUpdate.RaftLogIndex) { - level.Warn(h.logger).Log("msg", "job is not assigned to the worker", "job", jobUpdate.JobName, "raft_log_index", jobUpdate.RaftLogIndex) - continue - } - level.Debug(h.logger).Log("msg", "processing status update for compaction job", "job", jobUpdate.JobName, "status", jobUpdate.Status) - switch jobUpdate.Status { - case metastorev1.CompactionStatus_COMPACTION_STATUS_SUCCESS: - // clean up the job, we don't keep completed jobs around - h.compactionJobQueue.evict(job.Name, job.RaftLogIndex) - jobKey := tenantShard{tenant: job.TenantId, shard: job.Shard} - stateUpdate.deletedJobs[jobKey] = append(stateUpdate.deletedJobs[jobKey], job.Name) - h.metrics.completedJobs.WithLabelValues( - fmt.Sprint(job.Shard), job.TenantId, fmt.Sprint(job.CompactionLevel)).Inc() - - // next we'll replace source blocks with compacted ones - h.index.ReplaceBlocks(tx, job.Blocks, job.Shard, job.TenantId, jobUpdate.CompletedJob.Blocks) - for _, b := range jobUpdate.CompletedJob.Blocks { - level.Debug(h.logger).Log( - "msg", "added compacted block", - "block", b.Id, - "shard", b.Shard, - "tenant", b.TenantId, - "level", b.CompactionLevel, - "source_job", job.Name) - blockTenantShard := tenantShard{tenant: b.TenantId, shard: b.Shard} - stateUpdate.newBlocks[blockTenantShard] = append(stateUpdate.newBlocks[blockTenantShard], b) - - // adding new blocks to the compaction queue - if jobForNewBlock := h.tryCreateJob(b, jobUpdate.RaftLogIndex); jobForNewBlock != nil { - h.addCompactionJob(jobForNewBlock) - stateUpdate.newJobs = append(stateUpdate.newJobs, jobForNewBlock.Name) - h.metrics.addedJobs.WithLabelValues( - fmt.Sprint(jobForNewBlock.Shard), jobForNewBlock.TenantId, fmt.Sprint(jobForNewBlock.CompactionLevel)).Inc() - } else { - h.addBlockToCompactionJobQueue(b) - } - h.metrics.addedBlocks.WithLabelValues( - fmt.Sprint(b.Shard), b.TenantId, fmt.Sprint(b.CompactionLevel)).Inc() - - stateUpdate.updatedBlockQueues[blockTenantShard] = append(stateUpdate.updatedBlockQueues[blockTenantShard], b.CompactionLevel) - } - for _, b := range job.Blocks { - level.Debug(h.logger).Log( - "msg", "deleted source block", - "block", b, - "shard", job.Shard, - "tenant", job.TenantId, - "level", job.CompactionLevel, - "job", job.Name, - ) - h.metrics.deletedBlocks.WithLabelValues( - fmt.Sprint(job.Shard), job.TenantId, fmt.Sprint(job.CompactionLevel)).Inc() - stateUpdate.deletedBlocks[jobKey] = append(stateUpdate.deletedBlocks[jobKey], b) - } - case metastorev1.CompactionStatus_COMPACTION_STATUS_IN_PROGRESS: - level.Debug(h.logger).Log( - "msg", "compaction job still in progress", - "job", job.Name, - "tenant", job.TenantId, - "shard", job.Shard, - "level", job.CompactionLevel, - ) - h.compactionJobQueue.update(jobUpdate.JobName, cmd.AppendedAt.UnixNano(), jobUpdate.RaftLogIndex) - stateUpdate.updatedJobs = append(stateUpdate.updatedJobs, job.Name) - case metastorev1.CompactionStatus_COMPACTION_STATUS_FAILURE: - job.Failures += 1 - level.Warn(h.logger).Log( - "msg", "compaction job failed", - "job", job.Name, - "tenant", job.TenantId, - "shard", job.Shard, - "level", job.CompactionLevel, - "failures", job.Failures, - ) - if int(job.Failures) >= h.config.JobMaxFailures { - level.Warn(h.logger).Log( - "msg", "compaction job reached max failures", - "job", job.Name, - "tenant", job.TenantId, - "shard", job.Shard, - "level", job.CompactionLevel, - "failures", job.Failures, - ) - h.compactionJobQueue.cancel(job.Name) - stateUpdate.updatedJobs = append(stateUpdate.updatedJobs, job.Name) - h.metrics.discardedJobs.WithLabelValues( - fmt.Sprint(job.Shard), job.TenantId, fmt.Sprint(job.CompactionLevel)).Inc() - } else { - level.Warn(h.logger).Log( - "msg", "adding failed compaction job back to the queue", - "job", job.Name, - "tenant", job.TenantId, - "shard", job.Shard, - "level", job.CompactionLevel, - "failures", job.Failures, - ) - h.compactionJobQueue.evict(job.Name, math.MaxInt64) - job.Status = compactionpb.CompactionStatus_COMPACTION_STATUS_UNSPECIFIED - job.RaftLogIndex = 0 - job.LeaseExpiresAt = 0 - h.compactionJobQueue.enqueue(job) - stateUpdate.updatedJobs = append(stateUpdate.updatedJobs, job.Name) - h.metrics.retriedJobs.WithLabelValues( - fmt.Sprint(job.Shard), job.TenantId, fmt.Sprint(job.CompactionLevel)).Inc() - } - } - } - - resp = &metastorev1.PollCompactionJobsResponse{} - if request.JobCapacity > 0 { - newJobs := h.findJobsToAssign(int(request.JobCapacity), cmd.Index, cmd.AppendedAt.UnixNano()) - convertedJobs, invalidJobs := h.convertJobs(tx, newJobs) - resp.CompactionJobs = convertedJobs - for _, j := range convertedJobs { - stateUpdate.updatedJobs = append(stateUpdate.updatedJobs, j.Name) - h.metrics.assignedJobs.WithLabelValues( - fmt.Sprint(j.Shard), j.TenantId, fmt.Sprint(j.CompactionLevel)).Inc() - } - for _, j := range invalidJobs { - key := tenantShard{ - tenant: j.TenantId, - shard: j.Shard, - } - h.compactionJobQueue.evict(j.Name, math.MaxInt64) - stateUpdate.deletedJobs[key] = append(stateUpdate.deletedJobs[key], j.Name) - } - } - - err = h.writeToDb(tx, stateUpdate) - if err != nil { - return nil, err - } - - for key, blocks := range stateUpdate.deletedBlocks { - for _, block := range blocks { - err = h.marker.Mark(tx, key.shard, key.tenant, block, cmd.AppendedAt.UnixNano()/time.Millisecond.Nanoseconds()) - if err != nil { - return nil, err - } - } - } - - return resp, nil -} - -func (h *CompactionCommandHandler) convertJobs(tx *bbolt.Tx, jobs []*compactionpb.CompactionJob) (convertedJobs []*metastorev1.CompactionJob, invalidJobs []*compactionpb.CompactionJob) { - convertedJobs = make([]*metastorev1.CompactionJob, 0, len(jobs)) - invalidJobs = make([]*compactionpb.CompactionJob, 0, len(jobs)) - for _, job := range jobs { - // populate block metadata (workers rely on it) - blocks := make([]*metastorev1.BlockMeta, 0, len(job.Blocks)) - for _, bId := range job.Blocks { - b := h.index.FindBlock(tx, job.Shard, job.TenantId, bId) - if b == nil { - level.Error(h.logger).Log( - "msg", "failed to populate compaction job details, block not found", - "block", bId, - "shard", job.Shard, - "job", job.Name) - continue - } - blocks = append(blocks, b) - } - if len(blocks) == 0 { - invalidJobs = append(invalidJobs, job) - level.Warn(h.logger).Log("msg", "skipping assigned compaction job since it has no valid blocks", "job", job.Name) - continue - } - - convertedJobs = append(convertedJobs, &metastorev1.CompactionJob{ - Name: job.Name, - Blocks: blocks, - Status: &metastorev1.CompactionJobStatus{ - JobName: job.Name, - Status: metastorev1.CompactionStatus(job.Status), - RaftLogIndex: job.RaftLogIndex, - Shard: job.Shard, - TenantId: job.TenantId, - }, - CompactionLevel: job.CompactionLevel, - RaftLogIndex: job.RaftLogIndex, - Shard: job.Shard, - TenantId: job.TenantId, - }) - } - return convertedJobs, invalidJobs -} - -func (h *CompactionCommandHandler) findJobsToAssign(jobCapacity int, raftLogIndex uint64, now int64) []*compactionpb.CompactionJob { - jobsToAssign := make([]*compactionpb.CompactionJob, 0, jobCapacity) - jobCount, newJobs, inProgressJobs, completedJobs, failedJobs, cancelledJobs := h.compactionJobQueue.stats() - level.Debug(h.logger).Log( - "msg", "looking for jobs to assign", - "job_capacity", jobCapacity, - "raft_log_index", raftLogIndex, - "job_queue_size", jobCount, - "new_jobs_in_queue_count", len(newJobs), - "in_progress_jobs_in_queue_count", len(inProgressJobs), - "completed_jobs_in_queue_count", len(completedJobs), - "failed_jobs_in_queue_count", len(failedJobs), - "cancelled_jobs_in_queue_count", len(cancelledJobs), - ) - - var j *compactionpb.CompactionJob - for len(jobsToAssign) < jobCapacity { - j = h.compactionJobQueue.dequeue(now, raftLogIndex) - if j == nil { - break - } - level.Debug(h.logger).Log("msg", "assigning job to raftLogIndex", "job", j, "raft_log_index", raftLogIndex) - jobsToAssign = append(jobsToAssign, j) - } - - return jobsToAssign -} - -func (h *CompactionCommandHandler) writeToDb(tx *bbolt.Tx, sTable *pollStateUpdate) error { - for _, blocks := range sTable.newBlocks { - for _, block := range blocks { - err := persistBlock(tx, h.index.CreatePartitionKey(block.Id), block) - if err != nil { - return err - } - } - } - for key, blocks := range sTable.deletedBlocks { - for _, block := range blocks { - err := h.deleteBlock(tx, key.shard, key.tenant, block) - if err != nil { - return err - } - } - } - for _, jobName := range sTable.newJobs { - job := h.findJob(jobName) - if job == nil { - level.Error(h.logger).Log( - "msg", "a newly added job could not be found", - "job", jobName, - ) - continue - } - err := h.persistCompactionJob(job.Shard, job.TenantId, job, tx) + logger: logger, + index: index, + compactor: compactor, + planner: planner, + scheduler: scheduler, + tombstones: tombstones, + } +} + +func (h *CompactionCommandHandler) GetCompactionPlanUpdate( + tx *bbolt.Tx, cmd *raft.Log, req *raft_log.GetCompactionPlanUpdateRequest, +) (*raft_log.GetCompactionPlanUpdateResponse, error) { + // We need to generate a plan of the update caused by the new status + // report from the worker. The plan will be used to update the schedule + // after the Raft consensus is reached. + planner := h.planner.NewPlan(tx, cmd) + scheduler := h.scheduler.NewSchedule(tx, cmd) + p := new(raft_log.CompactionPlanUpdate) + + // Any status update may translate to either a job lease refresh, or a + // completed job. Status update might be rejected, if the worker has + // lost the job. We treat revoked jobs as vacant slots for new + // assignments, therefore we try to update jobs' status first. + var revoked int + for _, status := range req.StatusUpdates { + switch state := scheduler.UpdateJob(status); { + case state == nil: + // Nil state indicates that the job has been abandoned and + // reassigned, or the request is not valid. This may happen + // from time to time, and we should just ignore such requests. + revoked++ + + case state.Status == metastorev1.CompactionJobStatus_COMPACTION_STATUS_SUCCESS: + p.CompletedJobs = append(p.CompletedJobs, &raft_log.CompletedCompactionJob{State: state}) + + case state.Status == metastorev1.CompactionJobStatus_COMPACTION_STATUS_IN_PROGRESS: + p.UpdatedJobs = append(p.UpdatedJobs, &raft_log.UpdatedCompactionJob{State: state}) + + default: + // Unknown statuses are ignored. From the worker perspective, + // the job is re-assigned. + } + } + + // AssignJobsMax tells us how many free slots the worker has. We need to + // account for the revoked jobs, as they are freeing the worker slots. + capacity := int(req.AssignJobsMax) + revoked + + // Next, we need to create new jobs and assign existing + // + // NOTE(kolesnikovae): On one hand, if we assign first, we may violate the + // SJF principle. If we plan new jobs first, it may cause starvation of + // lower-priority jobs, when the compaction worker does not keep up with + // the high-priority job influx. As of now, we assign jobs before creating + // ones. If we change it, we need to make sure that the Schedule + // implementation allows doing this. + for assigned := 0; assigned < capacity; assigned++ { + job, err := scheduler.AssignJob() if err != nil { - return err - } - } - for key, levels := range sTable.updatedBlockQueues { - for _, l := range levels { - queue := h.getOrCreateCompactionBlockQueue(key).blocksByLevel[l] - if queue == nil { - level.Error(h.logger).Log( - "msg", "block queue not found", - "shard", key.shard, - "tenant", key.tenant, - "level", l, - ) - continue - } - err := h.persistCompactionJobBlockQueue(key.shard, key.tenant, l, queue, tx) - if err != nil { - return err - } + level.Error(h.logger).Log("msg", "failed to assign compaction job", "err", err) + return nil, err } - } - for key, jobNames := range sTable.deletedJobs { - for _, jobName := range jobNames { - jBucket, jKey := keyForCompactionJob(key.shard, key.tenant, jobName) - err := updateCompactionJobBucket(tx, jBucket, func(bucket *bbolt.Bucket) error { - level.Debug(h.logger).Log( - "msg", "deleting job from storage", - "job", jobName, - "shard", key.shard, - "tenant", key.tenant, - "storage_bucket", string(jBucket), - "storage_key", string(jKey)) - return bucket.Delete(jKey) - }) - if err != nil { - return err - } - } - } - for _, jobName := range sTable.updatedJobs { - job := h.findJob(jobName) if job == nil { - level.Error(h.logger).Log( - "msg", "an updated job could not be found", - "job", jobName, - ) - continue - } - err := h.persistCompactionJob(job.Shard, job.TenantId, job, tx) - if err != nil { - return err + // No more jobs to assign. + break } + p.AssignedJobs = append(p.AssignedJobs, job) } - return nil -} -func (h *CompactionCommandHandler) deleteBlock(tx *bbolt.Tx, shardId uint32, tenant, blockId string) error { - for _, meta := range h.index.FindPartitionMetas(blockId) { - err := index.UpdateBlockMetadataBucket(tx, meta.Key, shardId, tenant, func(bucket *bbolt.Bucket) error { - return bucket.Delete([]byte(blockId)) - }) + for created := 0; created < capacity; created++ { + plan, err := planner.CreateJob() if err != nil { - return err - } - } - return nil -} - -// CompactBlock is the entry point for adding blocks to the compaction flow. -// -// We add the block to a queue identified by the block shard, tenant and compaction level. -// -// If the job creation condition is met (based on the compaction strategy) after adding the block to the queue, -// we create a job and clear the queue. -// -// The method persists the optional job and the queue modification to both the memory state and the db. -func (h *CompactionCommandHandler) CompactBlock(tx *bbolt.Tx, cmd *raft.Log, block *metastorev1.BlockMeta) error { - // create and store an optional compaction job - if job := h.tryCreateJob(block, cmd.Index); job != nil { - if err := h.persistCompactionJob(block.Shard, block.TenantId, job, tx); err != nil { - return err - } - if err := h.persistCompactionJobBlockQueue(block.Shard, block.TenantId, block.CompactionLevel, []string{}, tx); err != nil { - return err - } - h.addCompactionJob(job) - h.metrics.addedJobs.WithLabelValues( - fmt.Sprint(job.Shard), job.TenantId, fmt.Sprint(job.CompactionLevel)).Inc() - } else { - key := tenantShard{ - tenant: block.TenantId, - shard: block.Shard, - } - queue := h.getOrCreateCompactionBlockQueue(key).blocksByLevel[block.CompactionLevel] - queue = append(queue, block.Id) - if err := h.persistCompactionJobBlockQueue(block.Shard, block.TenantId, block.CompactionLevel, queue, tx); err != nil { - return err + level.Error(h.logger).Log("msg", "failed to create compaction job", "err", err) + return nil, err } - h.addBlockToCompactionJobQueue(block) - } - h.metrics.addedBlocks.WithLabelValues( - fmt.Sprint(block.Shard), block.TenantId, fmt.Sprint(block.CompactionLevel)).Inc() - return nil -} - -func (h *CompactionCommandHandler) tryCreateJob(block *metastorev1.BlockMeta, raftLogIndex uint64) *compactionpb.CompactionJob { - key := tenantShard{ - tenant: block.TenantId, - shard: block.Shard, - } - blockQueue := h.getOrCreateCompactionBlockQueue(key) - blockQueue.mu.Lock() - defer blockQueue.mu.Unlock() - - if block.CompactionLevel >= globalCompactionStrategy.maxCompactionLevel { - level.Info(h.logger).Log("msg", "skipping block at max compaction level", "block", block.Id, "compaction_level", block.CompactionLevel) - return nil - } - - queuedBlocks := append(blockQueue.blocksByLevel[block.CompactionLevel], block.Id) - - level.Debug(h.logger).Log( - "msg", "adding block for compaction", - "block", block.Id, - "shard", block.Shard, - "tenant", block.TenantId, - "compaction_level", block.CompactionLevel, - "size", block.Size, - "queue_size", len(queuedBlocks), - "raft_log_index", raftLogIndex) - - strategy := getStrategyForLevel(block.CompactionLevel) - - var job *compactionpb.CompactionJob - if strategy.shouldCreateJob(queuedBlocks) { - blockIds := make([]string, 0, len(queuedBlocks)) - for _, b := range queuedBlocks { - blockIds = append(blockIds, b) - } - job = &compactionpb.CompactionJob{ - Name: fmt.Sprintf("L%d-S%d-%d", block.CompactionLevel, block.Shard, calculateHash(queuedBlocks)), - Blocks: blockIds, - Status: compactionpb.CompactionStatus_COMPACTION_STATUS_UNSPECIFIED, - Shard: block.Shard, - TenantId: block.TenantId, - CompactionLevel: block.CompactionLevel, + if plan == nil { + // No more jobs to create. + break } - level.Info(h.logger).Log( - "msg", "created compaction job", - "job", job.Name, - "blocks", strings.Join(queuedBlocks, ","), - "shard", block.Shard, - "tenant", block.TenantId, - "compaction_level", block.CompactionLevel) - } - return job -} - -func (h *CompactionCommandHandler) addCompactionJob(job *compactionpb.CompactionJob) { - level.Debug(h.logger).Log( - "msg", "adding compaction job to priority queue", - "job", job.Name, - "tenant", job.TenantId, - "shard", job.Shard, - "compaction_level", job.CompactionLevel, - ) - if ok := h.compactionJobQueue.enqueue(job); !ok { - level.Warn(h.logger).Log("msg", "a compaction job with this name already exists", "job", job.Name) - return + p.NewJobs = append(p.NewJobs, &raft_log.NewCompactionJob{ + State: scheduler.AddJob(plan), + Plan: plan, + }) } - // reset the pre-queue for this level - key := tenantShard{ - tenant: job.TenantId, - shard: job.Shard, - } - blockQueue := h.getOrCreateCompactionBlockQueue(key) - blockQueue.mu.Lock() - defer blockQueue.mu.Unlock() - blockQueue.blocksByLevel[job.CompactionLevel] = blockQueue.blocksByLevel[job.CompactionLevel][:0] + return &raft_log.GetCompactionPlanUpdateResponse{Term: cmd.Term, PlanUpdate: p}, nil } -func (h *CompactionCommandHandler) addBlockToCompactionJobQueue(block *metastorev1.BlockMeta) { - key := tenantShard{ - tenant: block.TenantId, - shard: block.Shard, +func (h *CompactionCommandHandler) UpdateCompactionPlan( + tx *bbolt.Tx, cmd *raft.Log, req *raft_log.UpdateCompactionPlanRequest, +) (*raft_log.UpdateCompactionPlanResponse, error) { + if req.Term != cmd.Term || req.GetPlanUpdate() == nil { + level.Warn(h.logger).Log( + "msg", "rejecting compaction plan update", + "current_term", cmd.Term, + "request_term", req.Term, + ) + return new(raft_log.UpdateCompactionPlanResponse), nil } - blockQueue := h.getOrCreateCompactionBlockQueue(key) - blockQueue.mu.Lock() - defer blockQueue.mu.Unlock() - level.Debug(h.logger).Log( - "msg", "adding block to compaction job block queue", - "block", block.Id, - "level", block.CompactionLevel, - "shard", block.Shard, - "tenant", block.TenantId) - blockQueue.blocksByLevel[block.CompactionLevel] = append(blockQueue.blocksByLevel[block.CompactionLevel], block.Id) -} - -func calculateHash(blocks []string) uint64 { - b := make([]byte, 0, 1024) - for _, blk := range blocks { - b = append(b, blk...) + if err := h.planner.UpdatePlan(tx, cmd, req.PlanUpdate); err != nil { + level.Error(h.logger).Log("msg", "failed to update compaction planner", "err", err) + return nil, err } - return xxhash.Sum64(b) -} -func (h *CompactionCommandHandler) persistCompactionJob(shard uint32, tenant string, job *compactionpb.CompactionJob, tx *bbolt.Tx) error { - jobBucketName, jobKey := keyForCompactionJob(shard, tenant, job.Name) - if err := updateCompactionJobBucket(tx, jobBucketName, func(bucket *bbolt.Bucket) error { - data, _ := job.MarshalVT() - level.Debug(h.logger).Log("msg", "persisting compaction job", "job", job.Name, "storage_bucket", jobBucketName, "storage_key", jobKey) - return bucket.Put(jobKey, data) - }); err != nil { - return err + if err := h.scheduler.UpdateSchedule(tx, cmd, req.PlanUpdate); err != nil { + level.Error(h.logger).Log("msg", "failed to update compaction schedule", "err", err) + return nil, err } - return nil -} -func (h *CompactionCommandHandler) persistCompactionJobBlockQueue(shard uint32, tenant string, compactionLevel uint32, queue []string, tx *bbolt.Tx) error { - jobBucketName, _ := keyForCompactionJob(shard, tenant, "") - blockQueue := &compactionpb.CompactionJobBlockQueue{ - CompactionLevel: compactionLevel, - Shard: shard, - Tenant: tenant, - Blocks: queue, + for _, job := range req.PlanUpdate.NewJobs { + if err := h.tombstones.DeleteTombstones(tx, cmd, job.Plan.Tombstones...); err != nil { + level.Error(h.logger).Log("msg", "failed to delete tombstones", "err", err) + return nil, err + } } - key := []byte(fmt.Sprintf("%s-%d", compactionBucketJobBlockQueuePrefix, compactionLevel)) - return updateCompactionJobBucket(tx, jobBucketName, func(bucket *bbolt.Bucket) error { - data, _ := blockQueue.MarshalVT() - return bucket.Put(key, data) - }) -} -func (h *CompactionCommandHandler) restoreCompactionPlan(tx *bbolt.Tx) error { - cdb := tx.Bucket(compactionJobBucketNameBytes) - return cdb.ForEachBucket(func(name []byte) error { - shard, tenant, ok := storeutils.ParseTenantShardBucketName(name) - if !ok { - _ = level.Error(h.logger).Log("msg", "malformed bucket name", "name", string(name)) - return nil + for _, job := range req.PlanUpdate.CompletedJobs { + compacted := job.GetCompactedBlocks() + if compacted == nil { + level.Error(h.logger).Log("msg", "compacted blocks are missing", "job", job.State.Name) + continue } - key := tenantShard{ - tenant: tenant, - shard: shard, + if err := h.index.ReplaceBlocks(tx, compacted); err != nil { + level.Error(h.logger).Log("msg", "failed to replace blocks", "err", err) + return nil, err } - blockQueue := h.getOrCreateCompactionBlockQueue(key) - - return h.loadCompactionPlan(cdb.Bucket(name), blockQueue) - }) - -} - -func (h *CompactionCommandHandler) getOrCreateCompactionBlockQueue(key tenantShard) *compactionJobBlockQueue { - if blockQueue, ok := h.compactionJobBlockQueues[key]; ok { - return blockQueue - } - plan := &compactionJobBlockQueue{ - blocksByLevel: make(map[uint32][]string), - } - h.compactionJobBlockQueues[key] = plan - return plan -} - -func (h *CompactionCommandHandler) findJob(name string) *compactionpb.CompactionJob { - h.compactionJobQueue.mu.Lock() - defer h.compactionJobQueue.mu.Unlock() - if jobEntry, exists := h.compactionJobQueue.jobs[name]; exists { - return jobEntry.CompactionJob - } - return nil -} - -func (h *CompactionCommandHandler) loadCompactionPlan(b *bbolt.Bucket, blockQueue *compactionJobBlockQueue) error { - blockQueue.mu.Lock() - defer blockQueue.mu.Unlock() - - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if strings.HasPrefix(string(k), compactionBucketJobBlockQueuePrefix) { - var storedBlockQueue compactionpb.CompactionJobBlockQueue - if err := storedBlockQueue.UnmarshalVT(v); err != nil { - return fmt.Errorf("failed to load compaction job block queue %q: %w", string(k), err) - } - blockQueue.blocksByLevel[storedBlockQueue.CompactionLevel] = storedBlockQueue.Blocks - level.Debug(h.logger).Log( - "msg", "restored compaction job block queue", - "shard", storedBlockQueue.Shard, - "compaction_level", storedBlockQueue.CompactionLevel, - "block_count", len(storedBlockQueue.Blocks), - "blocks", strings.Join(storedBlockQueue.Blocks, ",")) - } else { - var job compactionpb.CompactionJob - if err := job.UnmarshalVT(v); err != nil { - return fmt.Errorf("failed to unmarshal job %q: %w", string(k), err) + if err := h.tombstones.AddTombstones(tx, cmd, blockTombstonesForCompletedJob(job)); err != nil { + level.Error(h.logger).Log("msg", "failed to add tombstones", "err", err) + return nil, err + } + for _, block := range compacted.NewBlocks { + if err := h.compactor.Compact(tx, cmd, block); err != nil { + level.Error(h.logger).Log("msg", "failed to compact block", "err", err) + return nil, err } - h.compactionJobQueue.enqueue(&job) - level.Debug(h.logger).Log( - "msg", "restored job into queue", - "job", job.Name, - "shard", job.Shard, - "tenant", job.TenantId, - "compaction_level", job.CompactionLevel, - "job_status", job.Status.String(), - "raft_log_index", job.RaftLogIndex, - "lease_expires_at", job.LeaseExpiresAt, - "block_count", len(job.Blocks), - "blocks", strings.Join(job.Blocks, ",")) } } - return nil -} - -const ( - compactionJobBucketName = "compaction_job" - compactionBucketJobBlockQueuePrefix = "compaction-job-block-queue" -) - -var compactionJobBucketNameBytes = []byte(compactionJobBucketName) -func updateCompactionJobBucket(tx *bbolt.Tx, name []byte, fn func(*bbolt.Bucket) error) error { - cdb, err := getCompactionJobBucket(tx) - if err != nil { - return err - } - bucket, err := storeutils.GetOrCreateSubBucket(cdb, name) - if err != nil { - return err - } - return fn(bucket) -} - -// Bucket |Key -// [4:shard]|[job_name] -func keyForCompactionJob(shard uint32, tenant string, jobName string) (bucket, key []byte) { - bucket = make([]byte, 4+len(tenant)) - binary.BigEndian.PutUint32(bucket, shard) - copy(bucket[4:], tenant) - return bucket, []byte(jobName) + return &raft_log.UpdateCompactionPlanResponse{PlanUpdate: req.PlanUpdate}, nil } -func getCompactionJobBucket(tx *bbolt.Tx) (*bbolt.Bucket, error) { - return tx.CreateBucketIfNotExists(compactionJobBucketNameBytes) -} - -func (h *CompactionCommandHandler) Init(tx *bbolt.Tx) error { - if _, err := tx.CreateBucketIfNotExists(compactionJobBucketNameBytes); err != nil { - return err +func blockTombstonesForCompletedJob(job *raft_log.CompletedCompactionJob) *metastorev1.Tombstones { + source := job.CompactedBlocks.SourceBlocks + return &metastorev1.Tombstones{ + Blocks: &metastorev1.BlockTombstones{ + Name: job.State.Name, + Shard: source.Shard, + Tenant: source.Tenant, + CompactionLevel: job.State.CompactionLevel, + Blocks: source.Blocks, + }, } - return nil -} - -func (h *CompactionCommandHandler) Restore(tx *bbolt.Tx) error { - clear(h.compactionJobBlockQueues) - h.compactionJobQueue = newJobQueue(h.config.JobLeaseDuration.Nanoseconds()) - return h.restoreCompactionPlan(tx) } diff --git a/pkg/experiment/metastore/compaction_service.go b/pkg/experiment/metastore/compaction_service.go index 65a54396c3..df15ad4f9b 100644 --- a/pkg/experiment/metastore/compaction_service.go +++ b/pkg/experiment/metastore/compaction_service.go @@ -2,44 +2,155 @@ package metastore import ( "context" + "sync" "github.com/go-kit/log" "github.com/go-kit/log/level" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" "github.com/grafana/pyroscope/pkg/experiment/metastore/fsm" ) +type CompactionService struct { + metastorev1.CompactionServiceServer + + logger log.Logger + mu sync.Mutex + raft Raft +} + func NewCompactionService( logger log.Logger, - raftLog Raft, + raft Raft, ) *CompactionService { return &CompactionService{ logger: logger, - raft: raftLog, + raft: raft, } } -type CompactionService struct { - metastorev1.CompactionServiceServer - - logger log.Logger - raft Raft -} - func (svc *CompactionService) PollCompactionJobs( _ context.Context, req *metastorev1.PollCompactionJobsRequest, ) (*metastorev1.PollCompactionJobsResponse, error) { - level.Debug(svc.logger).Log( - "msg", "received poll compaction jobs request", - "num_updates", len(req.JobStatusUpdates), - "job_capacity", req.JobCapacity) - resp, err := svc.raft.Propose(fsm.RaftLogEntryType(raft_log.RaftCommand_RAFT_COMMAND_POLL_COMPACTION_JOBS), req) + // This is a two-step process. To commit changes to the compaction plan, + // we need to ensure that all replicas apply exactly the same changes. + // Instead of relying on identical behavior across replicas and a + // reproducible compaction plan, we explicitly replicate the change. + // + // NOTE(kolesnikovae): We can use Leader Read optimization here. However, + // we would need to ensure synchronization between the compactor and the + // index, and unsure isolation at the data level. For now, we're using + // the raft log to guarantee serializable isolation level. + // + // Make sure that only one compaction plan update is in progress at a time. + // This lock does not introduce contention, as the raft log is synchronous. + svc.mu.Lock() + defer svc.mu.Unlock() + + // First, we ask the current leader to prepare the change. This is a read + // operation conducted through the raft log: at this stage, we only + // prepare changes; the command handler does not alter the state. + request := &raft_log.GetCompactionPlanUpdateRequest{ + StatusUpdates: make([]*raft_log.CompactionJobStatusUpdate, 0, len(req.StatusUpdates)), + AssignJobsMax: req.JobCapacity, + } + + // We only send the status updates (without job results) to minimize the + // traffic, but we want to include the results of compaction in the final + // proposal. If the status update is accepted, we trust the worker and + // don't need to load our own copy of the job. + compacted := make(map[string]*metastorev1.CompactionJobStatusUpdate, len(req.StatusUpdates)) + for _, update := range req.StatusUpdates { + if update.CompactedBlocks != nil { + compacted[update.Name] = update + } + request.StatusUpdates = append(request.StatusUpdates, &raft_log.CompactionJobStatusUpdate{ + Name: update.Name, + Token: update.Token, + Status: update.Status, + }) + } + + cmd := fsm.RaftLogEntryType(raft_log.RaftCommand_RAFT_COMMAND_GET_COMPACTION_PLAN_UPDATE) + resp, err := svc.raft.Propose(cmd, req) if err != nil { - _ = level.Error(svc.logger).Log("msg", "failed to apply poll compaction jobs", "err", err) + level.Error(svc.logger).Log("msg", "failed to prepare compaction plan", "err", err) return nil, err } - return resp.(*metastorev1.PollCompactionJobsResponse), nil + prepared := resp.(*raft_log.GetCompactionPlanUpdateResponse) + planUpdate := prepared.GetPlanUpdate() + + // Copy plan updates to the worker response. The job plan is only sent for + // newly assigned jobs. Lease renewals do not require the plan to be sent. + workerResp := &metastorev1.PollCompactionJobsResponse{ + CompactionJobs: make([]*metastorev1.CompactionJob, 0, len(planUpdate.AssignedJobs)), + Assignments: make([]*metastorev1.CompactionJobAssignment, 0, len(planUpdate.UpdatedJobs)), + } + for _, updated := range planUpdate.UpdatedJobs { + update := updated.State + workerResp.Assignments = append(workerResp.Assignments, &metastorev1.CompactionJobAssignment{ + Name: update.Name, + Token: update.Token, + LeaseExpiresAt: update.LeaseExpiresAt, + }) + } + for _, assigned := range planUpdate.AssignedJobs { + assignment := assigned.State + workerResp.Assignments = append(workerResp.Assignments, &metastorev1.CompactionJobAssignment{ + Name: assignment.Name, + Token: assignment.Token, + LeaseExpiresAt: assignment.LeaseExpiresAt, + }) + job := assigned.Plan + workerResp.CompactionJobs = append(workerResp.CompactionJobs, &metastorev1.CompactionJob{ + Name: job.Name, + Shard: job.Shard, + Tenant: job.Tenant, + CompactionLevel: job.CompactionLevel, + SourceBlocks: job.SourceBlocks, + Tombstones: job.Tombstones, + }) + // Assigned jobs are not written to the raft log (only the assignments): + // from our perspective (scheduler and planner) these are just job updates. + assigned.Plan = nil + } + + // Include the compacted blocks in the final proposal. + for _, job := range planUpdate.CompletedJobs { + if update := compacted[job.State.Name]; update != nil { + job.CompactedBlocks = update.CompactedBlocks + } + } + + // Now that we have the plan, we need to propagate it through the + // raft log to ensure it is applied consistently across all replicas, + // regardless of their individual state or view of the plan. + cmd = fsm.RaftLogEntryType(raft_log.RaftCommand_RAFT_COMMAND_UPDATE_COMPACTION_PLAN) + + // We also include the current term of the planning step so that later + // we can verify that the leader has not changed, and the plan is still + // up-to-date. Otherwise, e.g., in the ABA case, when the current node + // loses leadership and gains is back in-between these two steps, we + // cannot guarantee that the proposed plan is still valid and up-to-date. + // The raft handler cannot return an error here (because this is a valid + // scenario, and we don't want to stop the node/cluster). Instead, an + // empty response would indicate that the plan is rejected. + proposal := &raft_log.UpdateCompactionPlanRequest{Term: prepared.Term, PlanUpdate: planUpdate} + if resp, err = svc.raft.Propose(cmd, proposal); err != nil { + level.Error(svc.logger).Log("msg", "failed to update compaction plan", "err", err) + return nil, err + } + accepted := resp.(*raft_log.UpdateCompactionPlanResponse).GetPlanUpdate() + if accepted == nil { + level.Warn(svc.logger).Log("msg", "compaction plan update rejected") + return nil, status.Error(codes.FailedPrecondition, "failed to update compaction plan") + } + + // As of now, accepted plan always matches the proposed one, + // so our prepared worker response is still valid. + return workerResp, nil } diff --git a/pkg/experiment/metastore/compactionpb/compaction.pb.go b/pkg/experiment/metastore/compactionpb/compaction.pb.go deleted file mode 100644 index fb5a1cd3f9..0000000000 --- a/pkg/experiment/metastore/compactionpb/compaction.pb.go +++ /dev/null @@ -1,430 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.34.2 -// protoc (unknown) -// source: experiment/metastore/compactionpb/compaction.proto - -package compactionpb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type CompactionStatus int32 - -const ( - CompactionStatus_COMPACTION_STATUS_UNSPECIFIED CompactionStatus = 0 - CompactionStatus_COMPACTION_STATUS_IN_PROGRESS CompactionStatus = 1 - CompactionStatus_COMPACTION_STATUS_SUCCESS CompactionStatus = 2 - CompactionStatus_COMPACTION_STATUS_FAILURE CompactionStatus = 3 - CompactionStatus_COMPACTION_STATUS_CANCELLED CompactionStatus = 4 -) - -// Enum value maps for CompactionStatus. -var ( - CompactionStatus_name = map[int32]string{ - 0: "COMPACTION_STATUS_UNSPECIFIED", - 1: "COMPACTION_STATUS_IN_PROGRESS", - 2: "COMPACTION_STATUS_SUCCESS", - 3: "COMPACTION_STATUS_FAILURE", - 4: "COMPACTION_STATUS_CANCELLED", - } - CompactionStatus_value = map[string]int32{ - "COMPACTION_STATUS_UNSPECIFIED": 0, - "COMPACTION_STATUS_IN_PROGRESS": 1, - "COMPACTION_STATUS_SUCCESS": 2, - "COMPACTION_STATUS_FAILURE": 3, - "COMPACTION_STATUS_CANCELLED": 4, - } -) - -func (x CompactionStatus) Enum() *CompactionStatus { - p := new(CompactionStatus) - *p = x - return p -} - -func (x CompactionStatus) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (CompactionStatus) Descriptor() protoreflect.EnumDescriptor { - return file_experiment_metastore_compactionpb_compaction_proto_enumTypes[0].Descriptor() -} - -func (CompactionStatus) Type() protoreflect.EnumType { - return &file_experiment_metastore_compactionpb_compaction_proto_enumTypes[0] -} - -func (x CompactionStatus) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use CompactionStatus.Descriptor instead. -func (CompactionStatus) EnumDescriptor() ([]byte, []int) { - return file_experiment_metastore_compactionpb_compaction_proto_rawDescGZIP(), []int{0} -} - -type CompactionJob struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Unique name of the job. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // List of the input blocks. - Blocks []string `protobuf:"bytes,2,rep,name=blocks,proto3" json:"blocks,omitempty"` - // Compaction level (all blocks are the same) - CompactionLevel uint32 `protobuf:"varint,3,opt,name=compaction_level,json=compactionLevel,proto3" json:"compaction_level,omitempty"` - // The index of the raft command that changed the status of the job. - // Used as a fencing token in conjunction with the lease_expires_at - // field to manage ownership of the compaction job. Any access to the - // job must be guarded by the check: current_index >= raft_log_index. - // If the check fails, the access should be denied. - // - // The index is updated every time the job is assigned to a worker. - RaftLogIndex uint64 `protobuf:"varint,4,opt,name=raft_log_index,json=raftLogIndex,proto3" json:"raft_log_index,omitempty"` - // Shard the blocks belong to. - Shard uint32 `protobuf:"varint,5,opt,name=shard,proto3" json:"shard,omitempty"` - // Optional, empty for compaction level 0. - TenantId string `protobuf:"bytes,6,opt,name=tenant_id,json=tenantId,proto3" json:"tenant_id,omitempty"` - Status CompactionStatus `protobuf:"varint,7,opt,name=status,proto3,enum=compaction.CompactionStatus" json:"status,omitempty"` - // The time the compaction job lease expires. If a lease is expired, the - // job is considered abandoned and can be picked up by another worker. - // The expiration check should be done by comparing the timestamp of - // the raft log entry (command that accesses the job) with the value of - // this field. - // - // The lease is extended every time the owner reports a status update. - LeaseExpiresAt int64 `protobuf:"varint,8,opt,name=lease_expires_at,json=leaseExpiresAt,proto3" json:"lease_expires_at,omitempty"` - // The number of failures when processing this job. Used for retries. - Failures uint32 `protobuf:"varint,9,opt,name=failures,proto3" json:"failures,omitempty"` - LastFailureReason string `protobuf:"bytes,10,opt,name=last_failure_reason,json=lastFailureReason,proto3" json:"last_failure_reason,omitempty"` -} - -func (x *CompactionJob) Reset() { - *x = CompactionJob{} - if protoimpl.UnsafeEnabled { - mi := &file_experiment_metastore_compactionpb_compaction_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CompactionJob) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CompactionJob) ProtoMessage() {} - -func (x *CompactionJob) ProtoReflect() protoreflect.Message { - mi := &file_experiment_metastore_compactionpb_compaction_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CompactionJob.ProtoReflect.Descriptor instead. -func (*CompactionJob) Descriptor() ([]byte, []int) { - return file_experiment_metastore_compactionpb_compaction_proto_rawDescGZIP(), []int{0} -} - -func (x *CompactionJob) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *CompactionJob) GetBlocks() []string { - if x != nil { - return x.Blocks - } - return nil -} - -func (x *CompactionJob) GetCompactionLevel() uint32 { - if x != nil { - return x.CompactionLevel - } - return 0 -} - -func (x *CompactionJob) GetRaftLogIndex() uint64 { - if x != nil { - return x.RaftLogIndex - } - return 0 -} - -func (x *CompactionJob) GetShard() uint32 { - if x != nil { - return x.Shard - } - return 0 -} - -func (x *CompactionJob) GetTenantId() string { - if x != nil { - return x.TenantId - } - return "" -} - -func (x *CompactionJob) GetStatus() CompactionStatus { - if x != nil { - return x.Status - } - return CompactionStatus_COMPACTION_STATUS_UNSPECIFIED -} - -func (x *CompactionJob) GetLeaseExpiresAt() int64 { - if x != nil { - return x.LeaseExpiresAt - } - return 0 -} - -func (x *CompactionJob) GetFailures() uint32 { - if x != nil { - return x.Failures - } - return 0 -} - -func (x *CompactionJob) GetLastFailureReason() string { - if x != nil { - return x.LastFailureReason - } - return "" -} - -type CompactionJobBlockQueue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - CompactionLevel uint32 `protobuf:"varint,1,opt,name=compaction_level,json=compactionLevel,proto3" json:"compaction_level,omitempty"` - Shard uint32 `protobuf:"varint,2,opt,name=shard,proto3" json:"shard,omitempty"` - Tenant string `protobuf:"bytes,3,opt,name=tenant,proto3" json:"tenant,omitempty"` - Blocks []string `protobuf:"bytes,4,rep,name=blocks,proto3" json:"blocks,omitempty"` -} - -func (x *CompactionJobBlockQueue) Reset() { - *x = CompactionJobBlockQueue{} - if protoimpl.UnsafeEnabled { - mi := &file_experiment_metastore_compactionpb_compaction_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CompactionJobBlockQueue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CompactionJobBlockQueue) ProtoMessage() {} - -func (x *CompactionJobBlockQueue) ProtoReflect() protoreflect.Message { - mi := &file_experiment_metastore_compactionpb_compaction_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CompactionJobBlockQueue.ProtoReflect.Descriptor instead. -func (*CompactionJobBlockQueue) Descriptor() ([]byte, []int) { - return file_experiment_metastore_compactionpb_compaction_proto_rawDescGZIP(), []int{1} -} - -func (x *CompactionJobBlockQueue) GetCompactionLevel() uint32 { - if x != nil { - return x.CompactionLevel - } - return 0 -} - -func (x *CompactionJobBlockQueue) GetShard() uint32 { - if x != nil { - return x.Shard - } - return 0 -} - -func (x *CompactionJobBlockQueue) GetTenant() string { - if x != nil { - return x.Tenant - } - return "" -} - -func (x *CompactionJobBlockQueue) GetBlocks() []string { - if x != nil { - return x.Blocks - } - return nil -} - -var File_experiment_metastore_compactionpb_compaction_proto protoreflect.FileDescriptor - -var file_experiment_metastore_compactionpb_compaction_proto_rawDesc = []byte{ - 0x0a, 0x32, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, - 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x70, 0x62, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0xeb, 0x02, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, - 0x6f, 0x62, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x29, - 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, - 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x61, 0x66, - 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x0c, 0x72, 0x61, 0x66, 0x74, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, - 0x49, 0x64, 0x12, 0x34, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x73, - 0x65, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, - 0x41, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x12, 0x2e, - 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x72, - 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6c, 0x61, 0x73, - 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x8a, - 0x01, 0x0a, 0x17, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x6f, 0x62, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, - 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x74, - 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x65, 0x6e, - 0x61, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x06, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x2a, 0xb7, 0x01, 0x0a, 0x10, - 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, - 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x49, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x47, - 0x52, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x53, 0x55, 0x43, 0x43, - 0x45, 0x53, 0x53, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x19, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, - 0x52, 0x45, 0x10, 0x03, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x43, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, - 0x4c, 0x45, 0x44, 0x10, 0x04, 0x42, 0xad, 0x01, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x63, 0x6f, - 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x61, 0x66, 0x61, 0x6e, 0x61, 0x2f, - 0x70, 0x79, 0x72, 0x6f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x65, 0x78, - 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xa2, - 0x02, 0x03, 0x43, 0x58, 0x58, 0xaa, 0x02, 0x0a, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0xca, 0x02, 0x0a, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0xe2, - 0x02, 0x16, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5c, 0x47, 0x50, 0x42, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0a, 0x43, 0x6f, 0x6d, 0x70, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_experiment_metastore_compactionpb_compaction_proto_rawDescOnce sync.Once - file_experiment_metastore_compactionpb_compaction_proto_rawDescData = file_experiment_metastore_compactionpb_compaction_proto_rawDesc -) - -func file_experiment_metastore_compactionpb_compaction_proto_rawDescGZIP() []byte { - file_experiment_metastore_compactionpb_compaction_proto_rawDescOnce.Do(func() { - file_experiment_metastore_compactionpb_compaction_proto_rawDescData = protoimpl.X.CompressGZIP(file_experiment_metastore_compactionpb_compaction_proto_rawDescData) - }) - return file_experiment_metastore_compactionpb_compaction_proto_rawDescData -} - -var file_experiment_metastore_compactionpb_compaction_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_experiment_metastore_compactionpb_compaction_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_experiment_metastore_compactionpb_compaction_proto_goTypes = []any{ - (CompactionStatus)(0), // 0: compaction.CompactionStatus - (*CompactionJob)(nil), // 1: compaction.CompactionJob - (*CompactionJobBlockQueue)(nil), // 2: compaction.CompactionJobBlockQueue -} -var file_experiment_metastore_compactionpb_compaction_proto_depIdxs = []int32{ - 0, // 0: compaction.CompactionJob.status:type_name -> compaction.CompactionStatus - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_experiment_metastore_compactionpb_compaction_proto_init() } -func file_experiment_metastore_compactionpb_compaction_proto_init() { - if File_experiment_metastore_compactionpb_compaction_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_experiment_metastore_compactionpb_compaction_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*CompactionJob); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_experiment_metastore_compactionpb_compaction_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*CompactionJobBlockQueue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_experiment_metastore_compactionpb_compaction_proto_rawDesc, - NumEnums: 1, - NumMessages: 2, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_experiment_metastore_compactionpb_compaction_proto_goTypes, - DependencyIndexes: file_experiment_metastore_compactionpb_compaction_proto_depIdxs, - EnumInfos: file_experiment_metastore_compactionpb_compaction_proto_enumTypes, - MessageInfos: file_experiment_metastore_compactionpb_compaction_proto_msgTypes, - }.Build() - File_experiment_metastore_compactionpb_compaction_proto = out.File - file_experiment_metastore_compactionpb_compaction_proto_rawDesc = nil - file_experiment_metastore_compactionpb_compaction_proto_goTypes = nil - file_experiment_metastore_compactionpb_compaction_proto_depIdxs = nil -} diff --git a/pkg/experiment/metastore/compactionpb/compaction.proto b/pkg/experiment/metastore/compactionpb/compaction.proto deleted file mode 100644 index 1c122f8784..0000000000 --- a/pkg/experiment/metastore/compactionpb/compaction.proto +++ /dev/null @@ -1,51 +0,0 @@ -syntax = "proto3"; - -package compaction; - -message CompactionJob { - // Unique name of the job. - string name = 1; - // List of the input blocks. - repeated string blocks = 2; - // Compaction level (all blocks are the same) - uint32 compaction_level = 3; - // The index of the raft command that changed the status of the job. - // Used as a fencing token in conjunction with the lease_expires_at - // field to manage ownership of the compaction job. Any access to the - // job must be guarded by the check: current_index >= raft_log_index. - // If the check fails, the access should be denied. - // - // The index is updated every time the job is assigned to a worker. - uint64 raft_log_index = 4; - // Shard the blocks belong to. - uint32 shard = 5; - // Optional, empty for compaction level 0. - string tenant_id = 6; - CompactionStatus status = 7; - // The time the compaction job lease expires. If a lease is expired, the - // job is considered abandoned and can be picked up by another worker. - // The expiration check should be done by comparing the timestamp of - // the raft log entry (command that accesses the job) with the value of - // this field. - // - // The lease is extended every time the owner reports a status update. - int64 lease_expires_at = 8; - // The number of failures when processing this job. Used for retries. - uint32 failures = 9; - string last_failure_reason = 10; -} - -enum CompactionStatus { - COMPACTION_STATUS_UNSPECIFIED = 0; - COMPACTION_STATUS_IN_PROGRESS = 1; - COMPACTION_STATUS_SUCCESS = 2; - COMPACTION_STATUS_FAILURE = 3; - COMPACTION_STATUS_CANCELLED = 4; -} - -message CompactionJobBlockQueue { - uint32 compaction_level = 1; - uint32 shard = 2; - string tenant = 3; - repeated string blocks = 4; -} diff --git a/pkg/experiment/metastore/compactionpb/compaction_vtproto.pb.go b/pkg/experiment/metastore/compactionpb/compaction_vtproto.pb.go deleted file mode 100644 index ebdff2b15b..0000000000 --- a/pkg/experiment/metastore/compactionpb/compaction_vtproto.pb.go +++ /dev/null @@ -1,690 +0,0 @@ -// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.6.0 -// source: experiment/metastore/compactionpb/compaction.proto - -package compactionpb - -import ( - fmt "fmt" - protohelpers "github.com/planetscale/vtprotobuf/protohelpers" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - io "io" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -func (m *CompactionJob) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CompactionJob) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *CompactionJob) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.LastFailureReason) > 0 { - i -= len(m.LastFailureReason) - copy(dAtA[i:], m.LastFailureReason) - i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LastFailureReason))) - i-- - dAtA[i] = 0x52 - } - if m.Failures != 0 { - i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Failures)) - i-- - dAtA[i] = 0x48 - } - if m.LeaseExpiresAt != 0 { - i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LeaseExpiresAt)) - i-- - dAtA[i] = 0x40 - } - if m.Status != 0 { - i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Status)) - i-- - dAtA[i] = 0x38 - } - if len(m.TenantId) > 0 { - i -= len(m.TenantId) - copy(dAtA[i:], m.TenantId) - i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TenantId))) - i-- - dAtA[i] = 0x32 - } - if m.Shard != 0 { - i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Shard)) - i-- - dAtA[i] = 0x28 - } - if m.RaftLogIndex != 0 { - i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RaftLogIndex)) - i-- - dAtA[i] = 0x20 - } - if m.CompactionLevel != 0 { - i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CompactionLevel)) - i-- - dAtA[i] = 0x18 - } - if len(m.Blocks) > 0 { - for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Blocks[iNdEx]) - copy(dAtA[i:], m.Blocks[iNdEx]) - i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Blocks[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CompactionJobBlockQueue) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CompactionJobBlockQueue) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *CompactionJobBlockQueue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Blocks) > 0 { - for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Blocks[iNdEx]) - copy(dAtA[i:], m.Blocks[iNdEx]) - i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Blocks[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Tenant) > 0 { - i -= len(m.Tenant) - copy(dAtA[i:], m.Tenant) - i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Tenant))) - i-- - dAtA[i] = 0x1a - } - if m.Shard != 0 { - i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Shard)) - i-- - dAtA[i] = 0x10 - } - if m.CompactionLevel != 0 { - i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CompactionLevel)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *CompactionJob) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) - } - if len(m.Blocks) > 0 { - for _, s := range m.Blocks { - l = len(s) - n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) - } - } - if m.CompactionLevel != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.CompactionLevel)) - } - if m.RaftLogIndex != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.RaftLogIndex)) - } - if m.Shard != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.Shard)) - } - l = len(m.TenantId) - if l > 0 { - n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) - } - if m.Status != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.Status)) - } - if m.LeaseExpiresAt != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.LeaseExpiresAt)) - } - if m.Failures != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.Failures)) - } - l = len(m.LastFailureReason) - if l > 0 { - n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *CompactionJobBlockQueue) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.CompactionLevel != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.CompactionLevel)) - } - if m.Shard != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.Shard)) - } - l = len(m.Tenant) - if l > 0 { - n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) - } - if len(m.Blocks) > 0 { - for _, s := range m.Blocks { - l = len(s) - n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func (m *CompactionJob) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CompactionJob: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CompactionJob: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return protohelpers.ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return protohelpers.ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return protohelpers.ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return protohelpers.ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Blocks = append(m.Blocks, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CompactionLevel", wireType) - } - m.CompactionLevel = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CompactionLevel |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RaftLogIndex", wireType) - } - m.RaftLogIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RaftLogIndex |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) - } - m.Shard = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Shard |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TenantId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return protohelpers.ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return protohelpers.ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TenantId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - m.Status = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Status |= CompactionStatus(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseExpiresAt", wireType) - } - m.LeaseExpiresAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.LeaseExpiresAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Failures", wireType) - } - m.Failures = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Failures |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastFailureReason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return protohelpers.ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return protohelpers.ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LastFailureReason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := protohelpers.Skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return protohelpers.ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CompactionJobBlockQueue) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CompactionJobBlockQueue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CompactionJobBlockQueue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CompactionLevel", wireType) - } - m.CompactionLevel = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CompactionLevel |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) - } - m.Shard = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Shard |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tenant", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return protohelpers.ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return protohelpers.ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tenant = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return protohelpers.ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return protohelpers.ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Blocks = append(m.Blocks, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := protohelpers.Skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return protohelpers.ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/pkg/experiment/metastore/dlq/recovery.go b/pkg/experiment/metastore/dlq/recovery.go index 7adf1a908e..604284587f 100644 --- a/pkg/experiment/metastore/dlq/recovery.go +++ b/pkg/experiment/metastore/dlq/recovery.go @@ -20,11 +20,11 @@ import ( ) type RecoveryConfig struct { - Period time.Duration `yaml:"check_interval"` + Period time.Duration `yaml:"dlq_recovery_check_interval"` } func (c *RecoveryConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - f.DurationVar(&c.Period, prefix+"check-interval", 15*time.Second, "Dead Letter Queue check interval.") + f.DurationVar(&c.Period, prefix+"dlq-recovery-check-interval", 15*time.Second, "Dead Letter Queue check interval.") } type LocalServer interface { diff --git a/pkg/experiment/metastore/fsm/boltdb.go b/pkg/experiment/metastore/fsm/boltdb.go index 3f07365e2e..136e926ee6 100644 --- a/pkg/experiment/metastore/fsm/boltdb.go +++ b/pkg/experiment/metastore/fsm/boltdb.go @@ -62,6 +62,7 @@ func (db *boltdb) open(readOnly bool) (err error) { } opts := *bbolt.DefaultOptions + // open is called with readOnly=true to verify the snapshot integrity. opts.ReadOnly = readOnly opts.PreLoadFreelist = !readOnly opts.InitialMmapSize = boltDBInitialMmapSize @@ -80,6 +81,9 @@ func (db *boltdb) open(readOnly bool) (err error) { func (db *boltdb) shutdown() { if db.boltdb != nil { + if err := db.boltdb.Sync(); err != nil { + _ = level.Error(db.logger).Log("msg", "failed to sync database", "err", err) + } if err := db.boltdb.Close(); err != nil { _ = level.Error(db.logger).Log("msg", "failed to close database", "err", err) } diff --git a/pkg/experiment/metastore/fsm/fsm.go b/pkg/experiment/metastore/fsm/fsm.go index 409a98a6f3..8ec7aabe99 100644 --- a/pkg/experiment/metastore/fsm/fsm.go +++ b/pkg/experiment/metastore/fsm/fsm.go @@ -2,6 +2,7 @@ package fsm import ( "context" + "encoding/binary" "fmt" "io" "strconv" @@ -46,6 +47,9 @@ type FSM struct { handlers map[RaftLogEntryType]handler restorers []StateRestorer + + appliedTerm uint64 + appliedIndex uint64 } type handler func(tx *bbolt.Tx, cmd *raft.Log, raw []byte) (proto.Message, error) @@ -101,6 +105,9 @@ func (fsm *FSM) init() (err error) { _ = tx.Rollback() } }() + if err = fsm.initRaftBucket(tx); err != nil { + return fmt.Errorf("failed to init raft bucket: %w", err) + } for _, r := range fsm.restorers { if err = r.Init(tx); err != nil { return err @@ -110,17 +117,14 @@ func (fsm *FSM) init() (err error) { } func (fsm *FSM) restore() error { + if err := fsm.db.boltdb.View(fsm.loadAppliedIndex); err != nil { + return fmt.Errorf("failed to load applied index: %w", err) + } + level.Info(fsm.logger).Log("msg", "restoring state", "term", fsm.appliedTerm, "applied_index", fsm.appliedIndex) g, _ := errgroup.WithContext(context.Background()) for _, r := range fsm.restorers { g.Go(func() error { - tx, err := fsm.db.boltdb.Begin(false) - if err != nil { - return err - } - defer func() { - _ = tx.Rollback() - }() - return r.Restore(tx) + return fsm.db.boltdb.View(r.Restore) }) } return g.Wait() @@ -198,6 +202,11 @@ func (fsm *FSM) applyCommand(cmd *raft.Log) any { if err := e.UnmarshalBinary(cmd.Data); err != nil { return errResponse(cmd, err) } + if cmd.Index <= fsm.appliedIndex { + // Skip already applied commands at WAL restore. + // Note that the 0 index is a noop and is never applied to FSM. + return Response{} + } cmdType := strconv.FormatUint(uint64(e.Type), 10) fsm.db.metrics.fsmApplyCommandSize.WithLabelValues(cmdType).Observe(float64(len(cmd.Data))) @@ -225,6 +234,10 @@ func (fsm *FSM) applyCommand(cmd *raft.Log) any { panic(fmt.Sprint("failed to apply command:", err)) } + if err = fsm.storeAppliedIndex(tx, cmd.Term, cmd.Index); err != nil { + panic(fmt.Sprint("failed to store applied index: %w", err)) + } + // We can't do anything about the failure at the database level, so we // panic here in a hope that other instances will handle the command. if err = tx.Commit(); err != nil { @@ -268,3 +281,50 @@ func (fsm *FSM) Shutdown() { fsm.db.shutdown() } } + +var ( + raftBucketName = []byte("raft") + appliedIndexKey = []byte("term.applied_index") + // Value is encoded as [8]term + [8]index. +) + +func (fsm *FSM) initRaftBucket(tx *bbolt.Tx) error { + b := tx.Bucket(raftBucketName) + if b != nil { + return nil + } + // If no bucket exists, we create a stub with 0 values. + if _, err := tx.CreateBucket(raftBucketName); err != nil { + return err + } + return fsm.storeAppliedIndex(tx, 0, 0) +} + +func (fsm *FSM) storeAppliedIndex(tx *bbolt.Tx, term, index uint64) error { + b := tx.Bucket(raftBucketName) + if b == nil { + return bbolt.ErrBucketNotFound + } + v := make([]byte, 16) + binary.BigEndian.PutUint64(v[0:8], term) + binary.BigEndian.PutUint64(v[8:16], index) + fsm.appliedTerm = term + fsm.appliedIndex = index + return b.Put(appliedIndexKey, v) +} + +var errAppliedIndexInvalid = fmt.Errorf("invalid applied index") + +func (fsm *FSM) loadAppliedIndex(tx *bbolt.Tx) error { + b := tx.Bucket(raftBucketName) + if b == nil { + return bbolt.ErrBucketNotFound + } + v := b.Get(appliedIndexKey) + if len(v) < 16 { + return errAppliedIndexInvalid + } + fsm.appliedTerm = binary.BigEndian.Uint64(v[0:8]) + fsm.appliedIndex = binary.BigEndian.Uint64(v[8:16]) + return nil +} diff --git a/pkg/experiment/metastore/index/index.go b/pkg/experiment/metastore/index/index.go index 9e14004fa9..90dd36becf 100644 --- a/pkg/experiment/metastore/index/index.go +++ b/pkg/experiment/metastore/index/index.go @@ -12,22 +12,28 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" - "github.com/prometheus/common/model" "go.etcd.io/bbolt" "golang.org/x/sync/errgroup" metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/pkg/experiment/metastore/index/store" ) +var ErrBlockExists = fmt.Errorf("block already exists") + type Store interface { - ListPartitions(tx *bbolt.Tx) []PartitionKey - ListShards(tx *bbolt.Tx, p PartitionKey) []uint32 - ListTenants(tx *bbolt.Tx, p PartitionKey, shard uint32) []string - ListBlocks(tx *bbolt.Tx, p PartitionKey, shard uint32, tenant string) []*metastorev1.BlockMeta + CreateBuckets(*bbolt.Tx) error + StoreBlock(*bbolt.Tx, store.PartitionKey, *metastorev1.BlockMeta) error + DeleteBlockList(*bbolt.Tx, store.PartitionKey, *metastorev1.BlockList) error + + ListPartitions(*bbolt.Tx) []store.PartitionKey + ListShards(*bbolt.Tx, store.PartitionKey) []uint32 + ListTenants(tx *bbolt.Tx, p store.PartitionKey, shard uint32) []string + ListBlocks(tx *bbolt.Tx, p store.PartitionKey, shard uint32, tenant string) []*metastorev1.BlockMeta } type Index struct { - Config *Config + config *Config partitionMu sync.Mutex loadedPartitions map[cacheKey]*indexPartition @@ -66,7 +72,7 @@ type indexShard struct { } type cacheKey struct { - partitionKey PartitionKey + partitionKey store.PartitionKey tenant string } @@ -92,18 +98,12 @@ func NewIndex(logger log.Logger, store Store, cfg *Config) *Index { allPartitions: make([]*PartitionMeta, 0), store: store, logger: logger, - Config: cfg, + config: cfg, } } -func (i *Index) Init(tx *bbolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(partitionBucketNameBytes) - return err -} - -func (i *Index) Restore(tx *bbolt.Tx) error { - i.LoadPartitions(tx) - return nil +func NewStore() *store.IndexStore { + return store.NewIndexStore() } // LoadPartitions reads all partitions from the backing store and loads the recent ones in memory. @@ -133,7 +133,7 @@ func (i *Index) LoadPartitions(tx *bbolt.Tx) { i.sortPartitions() } -func (i *Index) loadPartitionMeta(tx *bbolt.Tx, key PartitionKey) *PartitionMeta { +func (i *Index) loadPartitionMeta(tx *bbolt.Tx, key store.PartitionKey) *PartitionMeta { t, dur, _ := key.Parse() pMeta := &PartitionMeta{ Key: key, @@ -227,35 +227,8 @@ func (i *Index) getOrLoadPartition(tx *bbolt.Tx, meta *PartitionMeta, tenant str return p } -// CreatePartitionKey creates a partition key for a block. It is meant to be used for newly inserted blocks, as it relies -// on the index's currently configured partition duration to create the key. -// -// Note: Using this for existing blocks following a partition duration change can produce the wrong key. Callers should -// verify that the returned partition actually contains the block. -func (i *Index) CreatePartitionKey(blockId string) PartitionKey { - t := ulid.Time(ulid.MustParse(blockId).Time()).UTC() - - var b strings.Builder - b.Grow(16) - - year, month, day := t.Date() - b.WriteString(fmt.Sprintf("%04d%02d%02d", year, month, day)) - - partitionDuration := i.Config.PartitionDuration - if partitionDuration < 24*time.Hour { - hour := (t.Hour() / int(partitionDuration.Hours())) * int(partitionDuration.Hours()) - b.WriteString(fmt.Sprintf("T%02d", hour)) - } - - mDuration := model.Duration(partitionDuration) - b.WriteString(".") - b.WriteString(mDuration.String()) - - return PartitionKey(b.String()) -} - // findPartitionMeta retrieves the partition meta for the given key. -func (i *Index) findPartitionMeta(key PartitionKey) *PartitionMeta { +func (i *Index) findPartitionMeta(key store.PartitionKey) *PartitionMeta { for _, p := range i.allPartitions { if p.Key == key { return p @@ -264,12 +237,22 @@ func (i *Index) findPartitionMeta(key PartitionKey) *PartitionMeta { return nil } -// InsertBlock is the primary way for adding blocks to the index. -func (i *Index) InsertBlock(tx *bbolt.Tx, b *metastorev1.BlockMeta) { +func (i *Index) InsertBlock(tx *bbolt.Tx, b *metastorev1.BlockMeta) error { i.partitionMu.Lock() defer i.partitionMu.Unlock() + if x := i.findBlock(tx, b.Shard, b.TenantId, b.Id); x != nil { + return ErrBlockExists + } + i.insertBlock(tx, b) + pk := store.CreatePartitionKey(b.Id, i.config.PartitionDuration) + return i.store.StoreBlock(tx, pk, b) +} +func (i *Index) InsertBlockNoCheckNoPersist(tx *bbolt.Tx, b *metastorev1.BlockMeta) error { + i.partitionMu.Lock() + defer i.partitionMu.Unlock() i.insertBlock(tx, b) + return nil } // insertBlock is the underlying implementation for inserting blocks. It is the caller's responsibility to enforce safe @@ -291,7 +274,7 @@ func (i *Index) insertBlock(tx *bbolt.Tx, b *metastorev1.BlockMeta) { } func (i *Index) getOrCreatePartitionMeta(b *metastorev1.BlockMeta) *PartitionMeta { - key := i.CreatePartitionKey(b.Id) + key := store.CreatePartitionKey(b.Id, i.config.PartitionDuration) meta := i.findPartitionMeta(key) if meta == nil { @@ -318,14 +301,68 @@ func (i *Index) getOrCreatePartitionMeta(b *metastorev1.BlockMeta) *PartitionMet return meta } +func (i *Index) getOrCreatePartitionMetaForCacheKey(k cacheKey) *PartitionMeta { + meta := i.findPartitionMeta(k.partitionKey) + if meta == nil { + ts, duration, _ := k.partitionKey.Parse() + meta = &PartitionMeta{ + Key: k.partitionKey, + Ts: ts, + Duration: duration, + Tenants: make([]string, 0), + tenantMap: make(map[string]struct{}), + } + i.allPartitions = append(i.allPartitions, meta) + i.sortPartitions() + } + return meta +} + // FindBlock tries to retrieve an existing block from the index. It will load the corresponding partition if it is not // already loaded. Returns nil if the block cannot be found. func (i *Index) FindBlock(tx *bbolt.Tx, shardNum uint32, tenant string, blockId string) *metastorev1.BlockMeta { - // first try the currently mapped partition - key := i.CreatePartitionKey(blockId) + i.partitionMu.Lock() + defer i.partitionMu.Unlock() + return i.findBlock(tx, shardNum, tenant, blockId) +} + +func (i *Index) FindBlocks(tx *bbolt.Tx, list *metastorev1.BlockList) []*metastorev1.BlockMeta { i.partitionMu.Lock() defer i.partitionMu.Unlock() + pk := make(map[store.PartitionKey]struct{}) + left := make(map[string]struct{}) + for _, block := range list.Blocks { + pk[store.CreatePartitionKey(block, i.config.PartitionDuration)] = struct{}{} + left[block] = struct{}{} + } + + found := make([]*metastorev1.BlockMeta, 0, len(list.Blocks)) + for k := range pk { + meta := i.findPartitionMeta(k) + if meta == nil { + continue + } + p := i.getOrLoadPartition(tx, meta, list.Tenant) + s, _ := p.shards[list.Shard] + if s == nil { + continue + } + for b := range left { + if block := s.blocks[b]; block != nil { + found = append(found, block) + delete(left, b) + } + } + } + + return found +} + +func (i *Index) findBlock(tx *bbolt.Tx, shardNum uint32, tenant string, blockId string) *metastorev1.BlockMeta { + key := store.CreatePartitionKey(blockId, i.config.PartitionDuration) + + // first try the currently mapped partition b := i.findBlockInPartition(tx, key, shardNum, tenant, blockId) if b != nil { return b @@ -344,7 +381,7 @@ func (i *Index) FindBlock(tx *bbolt.Tx, shardNum uint32, tenant string, blockId return nil } -func (i *Index) findBlockInPartition(tx *bbolt.Tx, key PartitionKey, shard uint32, tenant string, blockId string) *metastorev1.BlockMeta { +func (i *Index) findBlockInPartition(tx *bbolt.Tx, key store.PartitionKey, shard uint32, tenant string, blockId string) *metastorev1.BlockMeta { meta := i.findPartitionMeta(key) if meta == nil { return nil @@ -370,8 +407,8 @@ func (i *Index) findBlockInPartition(tx *bbolt.Tx, key PartitionKey, shard uint3 func (i *Index) FindBlocksInRange(tx *bbolt.Tx, start, end int64, tenants map[string]struct{}) []*metastorev1.BlockMeta { i.partitionMu.Lock() defer i.partitionMu.Unlock() - startWithLookaround := start - i.Config.QueryLookaroundPeriod.Milliseconds() - endWithLookaround := end + i.Config.QueryLookaroundPeriod.Milliseconds() + startWithLookaround := start - i.config.QueryLookaroundPeriod.Milliseconds() + endWithLookaround := end + i.config.QueryLookaroundPeriod.Milliseconds() blocks := make([]*metastorev1.BlockMeta, 0) @@ -417,23 +454,78 @@ func (i *Index) collectTenantBlocks(p *indexPartition, start, end int64) []*meta // ReplaceBlocks removes source blocks from the index and inserts replacement blocks into the index. The intended usage // is for block compaction. The replacement blocks could be added to the same or a different partition. -func (i *Index) ReplaceBlocks(tx *bbolt.Tx, sources []string, sourceShard uint32, sourceTenant string, replacements []*metastorev1.BlockMeta) { +func (i *Index) ReplaceBlocks(tx *bbolt.Tx, compacted *metastorev1.CompactedBlocks) error { i.partitionMu.Lock() defer i.partitionMu.Unlock() + if err := i.insertBlocks(tx, compacted.NewBlocks); err != nil { + return err + } + return i.deleteBlockList(tx, compacted.SourceBlocks) +} + +func (i *Index) ReplaceBlocksNoCheckNoPersist(tx *bbolt.Tx, compacted *metastorev1.CompactedBlocks) error { + i.partitionMu.Lock() + defer i.partitionMu.Unlock() + for _, b := range compacted.NewBlocks { + i.insertBlock(tx, b) + } + source := compacted.SourceBlocks + for _, b := range source.Blocks { + i.deleteBlock(source.Shard, source.Tenant, b) + } + return nil +} - for _, newBlock := range replacements { - i.insertBlock(tx, newBlock) +func (i *Index) insertBlocks(tx *bbolt.Tx, blocks []*metastorev1.BlockMeta) error { + for _, b := range blocks { + k := store.CreatePartitionKey(b.Id, i.config.PartitionDuration) + i.insertBlock(tx, b) + if err := i.store.StoreBlock(tx, k, b); err != nil { + return err + } } + return nil +} - for _, sourceBlock := range sources { - i.deleteBlock(sourceShard, sourceTenant, sourceBlock) +func (i *Index) deleteBlockList(tx *bbolt.Tx, list *metastorev1.BlockList) error { + partitions := make(map[store.PartitionKey]*metastorev1.BlockList) + for _, block := range list.Blocks { + k := store.CreatePartitionKey(block, i.config.PartitionDuration) + v := partitions[k] + if v == nil { + v = &metastorev1.BlockList{ + Shard: list.Shard, + Tenant: list.Tenant, + Blocks: make([]string, 0, len(list.Blocks)), + } + partitions[k] = v + } + v.Blocks = append(v.Blocks, block) } + for k, partitioned := range partitions { + if err := i.store.DeleteBlockList(tx, k, partitioned); err != nil { + return err + } + ck := cacheKey{partitionKey: k, tenant: list.Tenant} + loaded := i.loadedPartitions[ck] + if loaded == nil { + continue + } + shard := loaded.shards[partitioned.Shard] + if shard == nil { + continue + } + for _, b := range partitioned.Blocks { + delete(shard.blocks, b) + } + } + return nil } // deleteBlock deletes a block from the index. It is the caller's responsibility to enforce safe concurrent access. func (i *Index) deleteBlock(shard uint32, tenant string, blockId string) { // first try the currently mapped partition - key := i.CreatePartitionKey(blockId) + key := store.CreatePartitionKey(blockId, i.config.PartitionDuration) if ok := i.tryDelete(key, shard, tenant, blockId); ok { return } @@ -450,7 +542,7 @@ func (i *Index) deleteBlock(shard uint32, tenant string, blockId string) { } } -func (i *Index) tryDelete(key PartitionKey, shard uint32, tenant string, blockId string) bool { +func (i *Index) tryDelete(key store.PartitionKey, shard uint32, tenant string, blockId string) bool { meta := i.findPartitionMeta(key) if meta == nil { return false @@ -497,7 +589,7 @@ func (i *Index) unloadPartitions() { excessPerTenant := make(map[string]int) for k, p := range i.loadedPartitions { tenantPartitions[k.tenant] = append(tenantPartitions[k.tenant], p) - if len(tenantPartitions[k.tenant]) > i.Config.PartitionCacheSize { + if len(tenantPartitions[k.tenant]) > i.config.PartitionCacheSize { excessPerTenant[k.tenant]++ } } @@ -528,3 +620,12 @@ func (i *Index) unloadPartitions() { } } } + +func (i *Index) Init(tx *bbolt.Tx) error { + return i.store.CreateBuckets(tx) +} + +func (i *Index) Restore(tx *bbolt.Tx) error { + i.LoadPartitions(tx) + return nil +} diff --git a/pkg/experiment/metastore/index/index_test.go b/pkg/experiment/metastore/index/index_test.go index d87ab16f7f..f293ab506a 100644 --- a/pkg/experiment/metastore/index/index_test.go +++ b/pkg/experiment/metastore/index/index_test.go @@ -8,13 +8,15 @@ import ( "time" "github.com/oklog/ulid" - "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "go.etcd.io/bbolt" metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" "github.com/grafana/pyroscope/pkg/experiment/metastore/index" + "github.com/grafana/pyroscope/pkg/experiment/metastore/index/store" + "github.com/grafana/pyroscope/pkg/test" "github.com/grafana/pyroscope/pkg/test/mocks/mockindex" "github.com/grafana/pyroscope/pkg/util" ) @@ -36,8 +38,8 @@ func TestIndex_FindBlocksInRange(t *testing.T) { createBlock("20240923T09.1h", 0), createBlock("20240923T10.1h", 0), }, - queryStart: createTime("2024-09-23T08:00:00.000Z"), - queryEnd: createTime("2024-09-23T09:00:00.000Z"), + queryStart: test.Time("2024-09-23T08:00:00.000Z"), + queryEnd: test.Time("2024-09-23T09:00:00.000Z"), want: 2, }, { @@ -49,8 +51,8 @@ func TestIndex_FindBlocksInRange(t *testing.T) { createBlock("20240923T09.1h", 0), createBlock("20240923T10.1h", 0), }, - queryStart: createTime("2024-09-23T04:00:00.000Z"), - queryEnd: createTime("2024-09-23T05:00:00.000Z"), + queryStart: test.Time("2024-09-23T04:00:00.000Z"), + queryEnd: test.Time("2024-09-23T05:00:00.000Z"), want: 0, }, { @@ -63,8 +65,8 @@ func TestIndex_FindBlocksInRange(t *testing.T) { createBlock("20240923T08.1h", -3*time.Hour), // // technically in range but we will not look here createBlock("20240923T10.1h", 0), }, - queryStart: createTime("2024-09-23T05:00:00.000Z"), - queryEnd: createTime("2024-09-23T06:00:00.000Z"), + queryStart: test.Time("2024-09-23T05:00:00.000Z"), + queryEnd: test.Time("2024-09-23T06:00:00.000Z"), want: 3, }, { @@ -77,23 +79,23 @@ func TestIndex_FindBlocksInRange(t *testing.T) { createBlock("20240923T08.1h", 1*time.Hour), // in range createBlock("20240923T10.1h", 0), }, - queryStart: createTime("2024-09-23T08:00:00.000Z"), - queryEnd: createTime("2024-09-23T09:00:00.000Z"), + queryStart: test.Time("2024-09-23T08:00:00.000Z"), + queryEnd: test.Time("2024-09-23T09:00:00.000Z"), want: 3, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - store := mockindex.NewMockStore(t) - store.On("ListShards", mock.Anything, mock.Anything).Return([]uint32{}) - i := index.NewIndex(util.Logger, store, &index.Config{ + mockStore := mockindex.NewMockStore(t) + mockStore.On("ListShards", mock.Anything, mock.Anything).Return([]uint32{}) + i := index.NewIndex(util.Logger, mockStore, &index.Config{ PartitionDuration: time.Hour, PartitionCacheSize: 24, QueryLookaroundPeriod: time.Hour, }) for _, b := range tt.blocks { - i.InsertBlock(nil, b) + i.InsertBlockNoCheckNoPersist(nil, b) } tenantMap := map[string]struct{}{"tenant-1": {}} found := i.FindBlocksInRange(nil, tt.queryStart, tt.queryEnd, tenantMap) @@ -106,33 +108,48 @@ func TestIndex_FindBlocksInRange(t *testing.T) { } }) } +} +func TestIndex_FindBlocks(t *testing.T) { + mockStore := mockindex.NewMockStore(t) + mockStore.On("ListShards", mock.Anything, mock.Anything).Return([]uint32{}) + i := index.NewIndex(util.Logger, mockStore, &index.Config{ + PartitionDuration: time.Hour, + PartitionCacheSize: 24, + QueryLookaroundPeriod: time.Hour, + }) + a := test.ULID("2024-09-21T08:00:00.123Z") + b := test.ULID("2024-09-22T08:00:00.123Z") + c := test.ULID("2024-09-23T08:00:00.123Z") + i.InsertBlockNoCheckNoPersist(nil, &metastorev1.BlockMeta{Id: a}) + i.InsertBlockNoCheckNoPersist(nil, &metastorev1.BlockMeta{Id: b}) + assert.Len(t, i.FindBlocks(nil, &metastorev1.BlockList{Blocks: []string{a, b, c}}), 2) } -func mockPartition(store *mockindex.MockStore, key index.PartitionKey, blocks []*metastorev1.BlockMeta) { +func mockPartition(store *mockindex.MockStore, key store.PartitionKey, blocks []*metastorev1.BlockMeta) { store.On("ListShards", mock.Anything, key).Return([]uint32{0}).Maybe() store.On("ListTenants", mock.Anything, key, uint32(0)).Return([]string{""}).Maybe() store.On("ListBlocks", mock.Anything, key, uint32(0), "").Return(blocks).Maybe() } func TestIndex_ForEachPartition(t *testing.T) { - store := mockindex.NewMockStore(t) - i := index.NewIndex(util.Logger, store, &index.Config{PartitionDuration: time.Hour}) + mockStore := mockindex.NewMockStore(t) + i := index.NewIndex(util.Logger, mockStore, &index.Config{PartitionDuration: time.Hour}) - keys := []index.PartitionKey{ + keys := []store.PartitionKey{ "20240923T06.1h", "20240923T07.1h", "20240923T08.1h", "20240923T09.1h", "20240923T10.1h", } - store.On("ListPartitions", mock.Anything).Return(keys) + mockStore.On("ListPartitions", mock.Anything).Return(keys) for _, key := range keys { - mockPartition(store, key, nil) + mockPartition(mockStore, key, nil) } i.LoadPartitions(nil) - visited := make(map[index.PartitionKey]struct{}) + visited := make(map[store.PartitionKey]struct{}) var mu sync.Mutex err := i.ForEachPartition(context.Background(), func(meta *index.PartitionMeta) error { mu.Lock() @@ -145,91 +162,34 @@ func TestIndex_ForEachPartition(t *testing.T) { require.Len(t, visited, 5) } -func TestIndex_GetPartitionKey(t *testing.T) { - tests := []struct { - name string - duration time.Duration - blockId string - want index.PartitionKey - }{ - { - name: "1d", - duration: createDuration("1d"), - blockId: createUlidString("2024-07-15T16:13:43.245Z"), - want: index.PartitionKey("20240715.1d"), - }, - { - name: "1h at start of the window", - duration: createDuration("1h"), - blockId: createUlidString("2024-07-15T16:00:00.000Z"), - want: index.PartitionKey("20240715T16.1h"), - }, - { - name: "1h in the middle of the window", - duration: createDuration("1h"), - blockId: createUlidString("2024-07-15T16:13:43.245Z"), - want: index.PartitionKey("20240715T16.1h"), - }, - { - name: "1h at the end of the window", - duration: createDuration("1h"), - blockId: createUlidString("2024-07-15T16:59:59.999Z"), - want: index.PartitionKey("20240715T16.1h"), - }, - { - name: "6h duration at midnight", - duration: createDuration("6h"), - blockId: createUlidString("2024-07-15T00:00:00.000Z"), - want: index.PartitionKey("20240715T00.6h"), - }, - { - name: "6h at the middle of a window", - duration: createDuration("6h"), - blockId: createUlidString("2024-07-15T15:13:43.245Z"), - want: index.PartitionKey("20240715T12.6h"), - }, - { - name: "6h at the end of the window", - duration: createDuration("6h"), - blockId: createUlidString("2024-07-15T23:59:59.999Z"), - want: index.PartitionKey("20240715T18.6h"), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - i := index.NewIndex(util.Logger, mockindex.NewMockStore(t), &index.Config{PartitionDuration: tt.duration}) - assert.Equalf(t, tt.want, i.CreatePartitionKey(tt.blockId), "CreatePartitionKey(%v)", tt.blockId) - }) - } -} - func TestIndex_InsertBlock(t *testing.T) { - store := mockindex.NewMockStore(t) - store.On("ListShards", mock.Anything, mock.Anything).Return([]uint32{}) - i := index.NewIndex(util.Logger, store, &index.Config{PartitionDuration: time.Hour, PartitionCacheSize: 1}) + mockStore := mockindex.NewMockStore(t) + mockStore.On("ListShards", mock.Anything, mock.Anything).Return([]uint32{}) + i := index.NewIndex(util.Logger, mockStore, &index.Config{PartitionDuration: time.Hour, PartitionCacheSize: 1}) block := &metastorev1.BlockMeta{ - Id: createUlidString("2024-09-23T08:00:00.123Z"), + Id: test.ULID("2024-09-23T08:00:00.123Z"), TenantId: "tenant-1", - MinTime: createTime("2024-09-23T08:00:00.000Z"), - MaxTime: createTime("2024-09-23T08:05:00.000Z"), + MinTime: test.Time("2024-09-23T08:00:00.000Z"), + MaxTime: test.Time("2024-09-23T08:05:00.000Z"), } - i.InsertBlock(nil, block) + i.InsertBlockNoCheckNoPersist(nil, block) require.NotNil(t, i.FindBlock(nil, 0, "tenant-1", block.Id)) - blocks := i.FindBlocksInRange(nil, createTime("2024-09-23T07:00:00.000Z"), createTime("2024-09-23T09:00:00.000Z"), map[string]struct{}{"tenant-1": {}}) + blocks := i.FindBlocksInRange(nil, test.Time("2024-09-23T07:00:00.000Z"), test.Time("2024-09-23T09:00:00.000Z"), map[string]struct{}{"tenant-1": {}}) require.Len(t, blocks, 1) require.Equal(t, block, blocks[0]) // inserting the block again is a noop - i.InsertBlock(nil, block) - blocks = i.FindBlocksInRange(nil, createTime("2024-09-23T07:00:00.000Z"), createTime("2024-09-23T09:00:00.000Z"), map[string]struct{}{"tenant-1": {}}) + i.InsertBlockNoCheckNoPersist(nil, block) + blocks = i.FindBlocksInRange(nil, test.Time("2024-09-23T07:00:00.000Z"), test.Time("2024-09-23T09:00:00.000Z"), map[string]struct{}{"tenant-1": {}}) require.Len(t, blocks, 1) require.Equal(t, block, blocks[0]) } func TestIndex_LoadPartitions(t *testing.T) { - store := mockindex.NewMockStore(t) - i := index.NewIndex(util.Logger, store, &index.Config{PartitionDuration: time.Hour, PartitionCacheSize: 1}) + mockStore := mockindex.NewMockStore(t) + config := &index.Config{PartitionDuration: time.Hour, PartitionCacheSize: 1} + i := index.NewIndex(util.Logger, mockStore, config) blocks := make([]*metastorev1.BlockMeta, 0, 420) for i := 0; i < 420; i++ { @@ -240,11 +200,11 @@ func TestIndex_LoadPartitions(t *testing.T) { blocks = append(blocks, block) } - partitionKey := i.CreatePartitionKey(blocks[0].Id) - store.On("ListPartitions", mock.Anything).Return([]index.PartitionKey{partitionKey}) - store.On("ListShards", mock.Anything, mock.Anything).Return([]uint32{0}) - store.On("ListTenants", mock.Anything, mock.Anything, mock.Anything).Return([]string{""}) - store.On("ListBlocks", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(blocks) + partitionKey := store.CreatePartitionKey(blocks[0].Id, config.PartitionDuration) + mockStore.On("ListPartitions", mock.Anything).Return([]store.PartitionKey{partitionKey}) + mockStore.On("ListShards", mock.Anything, mock.Anything).Return([]uint32{0}) + mockStore.On("ListTenants", mock.Anything, mock.Anything, mock.Anything).Return([]string{""}) + mockStore.On("ListBlocks", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(blocks) // restore from store i.LoadPartitions(nil) @@ -255,61 +215,71 @@ func TestIndex_LoadPartitions(t *testing.T) { } func TestIndex_ReplaceBlocks(t *testing.T) { - store := mockindex.NewMockStore(t) - store.On("ListShards", mock.Anything, mock.Anything).Return([]uint32{}) - i := index.NewIndex(util.Logger, store, &index.DefaultConfig) + mockStore := mockindex.NewMockStore(t) + mockStore.On("ListShards", mock.Anything, mock.Anything).Return([]uint32{}) + i := index.NewIndex(util.Logger, mockStore, &index.DefaultConfig) b1 := &metastorev1.BlockMeta{ - Id: createUlidString("2024-09-23T08:00:00.123Z"), + Id: test.ULID("2024-09-23T08:00:00.123Z"), } - i.InsertBlock(nil, b1) + i.InsertBlockNoCheckNoPersist(nil, b1) b2 := &metastorev1.BlockMeta{ - Id: createUlidString("2024-09-23T08:00:00.123Z"), + Id: test.ULID("2024-09-23T08:00:00.123Z"), } - i.InsertBlock(nil, b2) + i.InsertBlockNoCheckNoPersist(nil, b2) replacement := &metastorev1.BlockMeta{ - Id: createUlidString("2024-09-23T08:00:00.123Z"), + Id: test.ULID("2024-09-23T08:00:00.123Z"), CompactionLevel: 1, TenantId: "tenant-1", } - i.ReplaceBlocks(nil, []string{b1.Id, b2.Id}, 0, "", []*metastorev1.BlockMeta{replacement}) + compacted := &metastorev1.CompactedBlocks{ + SourceBlocks: &metastorev1.BlockList{ + Tenant: "", + Shard: 0, + Blocks: []string{b1.Id, b2.Id}, + }, + NewBlocks: []*metastorev1.BlockMeta{replacement}, + } + + require.NoError(t, i.ReplaceBlocksNoCheckNoPersist(nil, compacted)) require.Nil(t, i.FindBlock(nil, 0, "", b1.Id)) require.Nil(t, i.FindBlock(nil, 0, "", b2.Id)) require.NotNil(t, i.FindBlock(nil, 0, "tenant-1", replacement.Id)) } func TestIndex_DurationChange(t *testing.T) { - store := mockindex.NewMockStore(t) - store.On("ListShards", mock.Anything, mock.Anything).Return([]uint32{}) - i := index.NewIndex(util.Logger, store, &index.Config{PartitionDuration: 24 * time.Hour, PartitionCacheSize: 1}) + mockStore := mockindex.NewMockStore(t) + mockStore.On("ListShards", mock.Anything, mock.Anything).Return([]uint32{}) + config := &index.Config{PartitionDuration: 24 * time.Hour, PartitionCacheSize: 1} + i := index.NewIndex(util.Logger, mockStore, config) b := &metastorev1.BlockMeta{ - Id: createUlidString("2024-09-23T08:00:00.123Z"), + Id: test.ULID("2024-09-23T08:00:00.123Z"), } - i.InsertBlock(nil, b) + i.InsertBlockNoCheckNoPersist(nil, b) require.NotNil(t, i.FindBlock(nil, 0, "", b.Id)) - i.Config.PartitionDuration = time.Hour + config.PartitionDuration = time.Hour require.NotNil(t, i.FindBlock(nil, 0, "", b.Id)) } func TestIndex_UnloadPartitions(t *testing.T) { - store := mockindex.NewMockStore(t) - i := index.NewIndex(util.Logger, store, &index.Config{PartitionDuration: time.Hour, PartitionCacheSize: 3}) + mockStore := mockindex.NewMockStore(t) + i := index.NewIndex(util.Logger, mockStore, &index.Config{PartitionDuration: time.Hour, PartitionCacheSize: 3}) - keys := []index.PartitionKey{ + keys := []store.PartitionKey{ "20240923T06.1h", "20240923T07.1h", "20240923T08.1h", "20240923T09.1h", "20240923T10.1h", } - store.On("ListPartitions", mock.Anything).Return(keys) + mockStore.On("ListPartitions", mock.Anything).Return(keys) for _, key := range keys { - mockPartition(store, key, nil) + mockPartition(mockStore, key, nil) } i.LoadPartitions(nil) - require.True(t, store.AssertNumberOfCalls(t, "ListShards", 5)) + require.True(t, mockStore.AssertNumberOfCalls(t, "ListShards", 5)) for _, key := range keys { start, _, _ := key.Parse() @@ -318,44 +288,94 @@ func TestIndex_UnloadPartitions(t *testing.T) { } } // multiple reads cause a single store access - require.True(t, store.AssertNumberOfCalls(t, "ListShards", 10)) + require.True(t, mockStore.AssertNumberOfCalls(t, "ListShards", 10)) for c := 0; c < 10; c++ { - i.FindBlocksInRange(nil, createTime("2024-09-23T08:00:00.000Z"), createTime("2024-09-23T08:05:00.000Z"), map[string]struct{}{"": {}}) + i.FindBlocksInRange(nil, test.Time("2024-09-23T08:00:00.000Z"), test.Time("2024-09-23T08:05:00.000Z"), map[string]struct{}{"": {}}) } // this partition is still loaded in memory - require.True(t, store.AssertNumberOfCalls(t, "ListShards", 10)) + require.True(t, mockStore.AssertNumberOfCalls(t, "ListShards", 10)) for c := 0; c < 10; c++ { - i.FindBlocksInRange(nil, createTime("2024-09-23T06:00:00.000Z"), createTime("2024-09-23T06:05:00.000Z"), map[string]struct{}{"": {}}) + i.FindBlocksInRange(nil, test.Time("2024-09-23T06:00:00.000Z"), test.Time("2024-09-23T06:05:00.000Z"), map[string]struct{}{"": {}}) } // this partition was unloaded - require.True(t, store.AssertNumberOfCalls(t, "ListShards", 11)) -} - -func createUlidString(t string) string { - parsed, _ := time.Parse(time.RFC3339, t) - l := ulid.MustNew(ulid.Timestamp(parsed), rand.Reader) - return l.String() -} - -func createDuration(d string) time.Duration { - parsed, _ := model.ParseDuration(d) - return time.Duration(parsed) -} - -func createTime(t string) int64 { - ts, _ := time.Parse(time.RFC3339, t) - return ts.UnixMilli() + require.True(t, mockStore.AssertNumberOfCalls(t, "ListShards", 11)) } func createBlock(key string, offset time.Duration) *metastorev1.BlockMeta { - pKey := index.PartitionKey(key) + pKey := store.PartitionKey(key) ts, _, _ := pKey.Parse() return &metastorev1.BlockMeta{ - Id: createUlidString(ts.Format(time.RFC3339)), + Id: test.ULID(ts.Format(time.RFC3339)), MinTime: ts.Add(offset).UnixMilli(), MaxTime: ts.Add(offset).Add(5 * time.Minute).UnixMilli(), TenantId: "tenant-1", } } + +func TestReplaceBlocks_Persistence(t *testing.T) { + db := test.BoltDB(t) + c := &index.Config{ + PartitionDuration: 24 * time.Hour, + PartitionCacheSize: 7, + QueryLookaroundPeriod: time.Hour, + } + md1 := &metastorev1.BlockMeta{ + Id: test.ULID("2024-09-22T08:00:00.123Z"), + Shard: 3, + CompactionLevel: 0, + TenantId: "", + } + md2 := &metastorev1.BlockMeta{ + Id: test.ULID("2024-09-22T08:01:00.123Z"), + Shard: 3, + CompactionLevel: 0, + TenantId: "", + } + md3 := &metastorev1.BlockMeta{ + Id: test.ULID("2024-09-25T09:00:00.123Z"), + Shard: 3, + CompactionLevel: 1, + TenantId: "x1", + } + md4 := &metastorev1.BlockMeta{ + Id: test.ULID("2024-09-25T09:01:00.123Z"), + Shard: 3, + CompactionLevel: 1, + TenantId: "x2", + } + + x := index.NewIndex(util.Logger, index.NewStore(), c) + require.NoError(t, db.Update(x.Init)) + require.NoError(t, db.View(x.Restore)) + + require.NoError(t, db.Update(func(tx *bbolt.Tx) error { + return x.InsertBlock(tx, md1) + })) + require.NoError(t, db.Update(func(tx *bbolt.Tx) error { + return x.InsertBlock(tx, md2) + })) + + require.NoError(t, db.Update(func(tx *bbolt.Tx) error { + return x.ReplaceBlocks(tx, &metastorev1.CompactedBlocks{ + NewBlocks: []*metastorev1.BlockMeta{md3, md4}, + SourceBlocks: &metastorev1.BlockList{ + Tenant: md1.TenantId, + Shard: md1.Shard, + Blocks: []string{md1.Id, md2.Id}, + }, + }) + })) + + x = index.NewIndex(util.Logger, index.NewStore(), c) + require.NoError(t, db.Update(x.Init)) + require.NoError(t, db.View(x.Restore)) + require.NoError(t, db.View(func(tx *bbolt.Tx) error { + require.Nil(t, x.FindBlock(tx, md1.Shard, md1.TenantId, md1.Id)) + require.Nil(t, x.FindBlock(tx, md2.Shard, md2.TenantId, md2.Id)) + require.NotNil(t, x.FindBlock(tx, md3.Shard, md3.TenantId, md3.Id)) + require.NotNil(t, x.FindBlock(tx, md4.Shard, md4.TenantId, md4.Id)) + return nil + })) +} diff --git a/pkg/experiment/metastore/index/partition_key.go b/pkg/experiment/metastore/index/partition_key.go deleted file mode 100644 index 2b0c00b22c..0000000000 --- a/pkg/experiment/metastore/index/partition_key.go +++ /dev/null @@ -1,37 +0,0 @@ -package index - -import ( - "fmt" - "strings" - "time" - - "github.com/prometheus/common/model" -) - -const ( - dayLayout = "20060102" - hourLayout = "20060102T15" -) - -func getTimeLayout(d model.Duration) string { - if time.Duration(d) >= 24*time.Hour { - return dayLayout - } else { - return hourLayout - } -} - -type PartitionKey string - -func (k PartitionKey) Parse() (t time.Time, d time.Duration, err error) { - parts := strings.Split(string(k), ".") - if len(parts) != 2 { - return time.Time{}, 0, fmt.Errorf("invalid partition key: %s", k) - } - mDur, err := model.ParseDuration(parts[1]) - if err != nil { - return time.Time{}, 0, fmt.Errorf("invalid duration in partition key: %s", k) - } - t, err = time.Parse(getTimeLayout(mDur), parts[0]) - return t, time.Duration(mDur), err -} diff --git a/pkg/experiment/metastore/index/partition_meta.go b/pkg/experiment/metastore/index/partition_meta.go index 08f6cbfb48..d080ca0039 100644 --- a/pkg/experiment/metastore/index/partition_meta.go +++ b/pkg/experiment/metastore/index/partition_meta.go @@ -2,10 +2,12 @@ package index import ( "time" + + "github.com/grafana/pyroscope/pkg/experiment/metastore/index/store" ) type PartitionMeta struct { - Key PartitionKey + Key store.PartitionKey Ts time.Time Duration time.Duration Tenants []string diff --git a/pkg/experiment/metastore/index/store.go b/pkg/experiment/metastore/index/store.go deleted file mode 100644 index 8b4d8ebc7b..0000000000 --- a/pkg/experiment/metastore/index/store.go +++ /dev/null @@ -1,159 +0,0 @@ -package index - -import ( - "encoding/binary" - "fmt" - "slices" - - "github.com/go-kit/log" - "github.com/pkg/errors" - "go.etcd.io/bbolt" - - metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" - "github.com/grafana/pyroscope/pkg/experiment/metastore/storeutils" -) - -type indexStore struct { - logger log.Logger -} - -func NewIndexStore(logger log.Logger) Store { - return &indexStore{ - logger: logger, - } -} - -const ( - partitionBucketName = "partition" - emptyTenantBucketName = "-" -) - -var partitionBucketNameBytes = []byte(partitionBucketName) -var emptyTenantBucketNameBytes = []byte(emptyTenantBucketName) - -func getPartitionBucket(tx *bbolt.Tx) (*bbolt.Bucket, error) { - bkt := tx.Bucket(partitionBucketNameBytes) - return bkt, nil -} - -func (m *indexStore) ListPartitions(tx *bbolt.Tx) []PartitionKey { - partitionKeys := make([]PartitionKey, 0) - bkt, err := getPartitionBucket(tx) - if err != nil { - return nil - } - _ = bkt.ForEachBucket(func(name []byte) error { - partitionKeys = append(partitionKeys, PartitionKey(name)) - return nil - }) - return partitionKeys -} - -func (m *indexStore) ListShards(tx *bbolt.Tx, key PartitionKey) []uint32 { - shards := make([]uint32, 0) - bkt, err := getPartitionBucket(tx) - if err != nil { - return nil - } - partBkt := bkt.Bucket([]byte(key)) - if partBkt == nil { - return nil - } - _ = partBkt.ForEachBucket(func(name []byte) error { - shards = append(shards, binary.BigEndian.Uint32(name)) - return nil - }) - return shards -} - -func (m *indexStore) ListTenants(tx *bbolt.Tx, key PartitionKey, shard uint32) []string { - tenants := make([]string, 0) - bkt, err := getPartitionBucket(tx) - if err != nil { - return nil - } - partBkt := bkt.Bucket([]byte(key)) - if partBkt == nil { - return nil - } - shardBktName := make([]byte, 4) - binary.BigEndian.PutUint32(shardBktName, shard) - shardBkt := partBkt.Bucket(shardBktName) - if shardBkt == nil { - return nil - } - _ = shardBkt.ForEachBucket(func(name []byte) error { - if slices.Equal(name, emptyTenantBucketNameBytes) { - tenants = append(tenants, "") - } else { - tenants = append(tenants, string(name)) - } - return nil - }) - return tenants -} - -func (m *indexStore) ListBlocks(tx *bbolt.Tx, key PartitionKey, shard uint32, tenant string) []*metastorev1.BlockMeta { - blocks := make([]*metastorev1.BlockMeta, 0) - bkt, err := getPartitionBucket(tx) - if err != nil { - return nil - } - partBkt := bkt.Bucket([]byte(key)) - if partBkt == nil { - return nil - } - shardBktName := make([]byte, 4) - binary.BigEndian.PutUint32(shardBktName, shard) - shardBkt := partBkt.Bucket(shardBktName) - if shardBkt == nil { - return nil - } - tenantBktName := []byte(tenant) - if len(tenantBktName) == 0 { - tenantBktName = emptyTenantBucketNameBytes - } - tenantBkt := shardBkt.Bucket(tenantBktName) - if tenantBkt == nil { - return nil - } - _ = tenantBkt.ForEach(func(k, v []byte) error { - var md metastorev1.BlockMeta - if err := md.UnmarshalVT(v); err != nil { - panic(fmt.Sprintf("failed to unmarshal block %q: %v", string(k), err)) - } - blocks = append(blocks, &md) - return nil - }) - return blocks -} - -func UpdateBlockMetadataBucket(tx *bbolt.Tx, partKey PartitionKey, shard uint32, tenant string, fn func(*bbolt.Bucket) error) error { - bkt, err := getPartitionBucket(tx) - if err != nil { - return errors.Wrap(err, "root partition bucket missing") - } - - partBkt, err := storeutils.GetOrCreateSubBucket(bkt, []byte(partKey)) - if err != nil { - return errors.Wrapf(err, "error creating partition bucket for %s", partKey) - } - - shardBktName := make([]byte, 4) - binary.BigEndian.PutUint32(shardBktName, shard) - shardBkt, err := storeutils.GetOrCreateSubBucket(partBkt, shardBktName) - if err != nil { - return errors.Wrapf(err, "error creating shard bucket for partiton %s and shard %d", partKey, shard) - } - - tenantBktName := []byte(tenant) - if len(tenantBktName) == 0 { - tenantBktName = emptyTenantBucketNameBytes - } - tenantBkt, err := storeutils.GetOrCreateSubBucket(shardBkt, tenantBktName) - if err != nil { - return errors.Wrapf(err, "error creating tenant bucket for partition %s, shard %d and tenant %s", partKey, shard, tenant) - } - - return fn(tenantBkt) -} diff --git a/pkg/experiment/metastore/index/store/index_store.go b/pkg/experiment/metastore/index/store/index_store.go new file mode 100644 index 0000000000..93c03da701 --- /dev/null +++ b/pkg/experiment/metastore/index/store/index_store.go @@ -0,0 +1,181 @@ +package store + +import ( + "bytes" + "encoding/binary" + "fmt" + + "go.etcd.io/bbolt" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" +) + +const ( + partitionBucketName = "partition" + emptyTenantBucketName = "-" +) + +var ( + partitionBucketNameBytes = []byte(partitionBucketName) + emptyTenantBucketNameBytes = []byte(emptyTenantBucketName) +) + +type IndexStore struct{} + +func NewIndexStore() *IndexStore { + return &IndexStore{} +} + +func getPartitionBucket(tx *bbolt.Tx) *bbolt.Bucket { + return tx.Bucket(partitionBucketNameBytes) +} + +func (m *IndexStore) CreateBuckets(tx *bbolt.Tx) error { + _, err := tx.CreateBucketIfNotExists(partitionBucketNameBytes) + return err +} + +func (m *IndexStore) StoreBlock(tx *bbolt.Tx, pk PartitionKey, b *metastorev1.BlockMeta) error { + key := []byte(b.Id) + value, err := b.MarshalVT() + if err != nil { + return err + } + partBkt, err := getOrCreateSubBucket(getPartitionBucket(tx), []byte(pk)) + if err != nil { + return fmt.Errorf("error creating partition bucket for %s: %w", pk, err) + } + + shardBktName := make([]byte, 4) + binary.BigEndian.PutUint32(shardBktName, b.Shard) + shardBkt, err := getOrCreateSubBucket(partBkt, shardBktName) + if err != nil { + return fmt.Errorf("error creating shard bucket for partiton %s and shard %d: %w", pk, b.Shard, err) + } + + tenantBktName := []byte(b.TenantId) + if len(tenantBktName) == 0 { + tenantBktName = emptyTenantBucketNameBytes + } + tenantBkt, err := getOrCreateSubBucket(shardBkt, tenantBktName) + if err != nil { + return fmt.Errorf("error creating tenant bucket for partition %s, shard %d and tenant %s: %w", pk, b.Shard, b.TenantId, err) + } + + return tenantBkt.Put(key, value) +} + +func (m *IndexStore) DeleteBlockList(tx *bbolt.Tx, pk PartitionKey, list *metastorev1.BlockList) error { + partitions := getPartitionBucket(tx) + if partitions == nil { + return nil + } + partition := partitions.Bucket([]byte(pk)) + if partition == nil { + return nil + } + shardBktName := make([]byte, 4) + binary.BigEndian.PutUint32(shardBktName, list.Shard) + shards := partition.Bucket(shardBktName) + if shards == nil { + return nil + } + tenantBktName := []byte(list.Tenant) + if len(tenantBktName) == 0 { + tenantBktName = emptyTenantBucketNameBytes + } + tenant := shards.Bucket(tenantBktName) + if tenant == nil { + return nil + } + for _, b := range list.Blocks { + if err := tenant.Delete([]byte(b)); err != nil { + return err + } + } + return nil +} + +func (m *IndexStore) ListPartitions(tx *bbolt.Tx) []PartitionKey { + partitionKeys := make([]PartitionKey, 0) + _ = getPartitionBucket(tx).ForEachBucket(func(name []byte) error { + partitionKeys = append(partitionKeys, PartitionKey(name)) + return nil + }) + return partitionKeys +} + +func (m *IndexStore) ListShards(tx *bbolt.Tx, key PartitionKey) []uint32 { + shards := make([]uint32, 0) + partBkt := getPartitionBucket(tx).Bucket([]byte(key)) + if partBkt == nil { + return nil + } + _ = partBkt.ForEachBucket(func(name []byte) error { + shards = append(shards, binary.BigEndian.Uint32(name)) + return nil + }) + return shards +} + +func (m *IndexStore) ListTenants(tx *bbolt.Tx, key PartitionKey, shard uint32) []string { + tenants := make([]string, 0) + partBkt := getPartitionBucket(tx).Bucket([]byte(key)) + if partBkt == nil { + return nil + } + shardBktName := make([]byte, 4) + binary.BigEndian.PutUint32(shardBktName, shard) + shardBkt := partBkt.Bucket(shardBktName) + if shardBkt == nil { + return nil + } + _ = shardBkt.ForEachBucket(func(name []byte) error { + if bytes.Equal(name, emptyTenantBucketNameBytes) { + tenants = append(tenants, "") + } else { + tenants = append(tenants, string(name)) + } + return nil + }) + return tenants +} + +func (m *IndexStore) ListBlocks(tx *bbolt.Tx, key PartitionKey, shard uint32, tenant string) []*metastorev1.BlockMeta { + blocks := make([]*metastorev1.BlockMeta, 0) + partBkt := getPartitionBucket(tx).Bucket([]byte(key)) + if partBkt == nil { + return nil + } + shardBktName := make([]byte, 4) + binary.BigEndian.PutUint32(shardBktName, shard) + shardBkt := partBkt.Bucket(shardBktName) + if shardBkt == nil { + return nil + } + tenantBktName := []byte(tenant) + if len(tenantBktName) == 0 { + tenantBktName = emptyTenantBucketNameBytes + } + tenantBkt := shardBkt.Bucket(tenantBktName) + if tenantBkt == nil { + return nil + } + _ = tenantBkt.ForEach(func(k, v []byte) error { + var md metastorev1.BlockMeta + if err := md.UnmarshalVT(v); err != nil { + panic(fmt.Sprintf("failed to unmarshal block %q: %v", string(k), err)) + } + blocks = append(blocks, &md) + return nil + }) + return blocks +} + +func getOrCreateSubBucket(parent *bbolt.Bucket, name []byte) (*bbolt.Bucket, error) { + bucket := parent.Bucket(name) + if bucket == nil { + return parent.CreateBucket(name) + } + return bucket, nil +} diff --git a/pkg/experiment/metastore/index/store/partition_key.go b/pkg/experiment/metastore/index/store/partition_key.go new file mode 100644 index 0000000000..7f23f7a26a --- /dev/null +++ b/pkg/experiment/metastore/index/store/partition_key.go @@ -0,0 +1,65 @@ +package store + +import ( + "fmt" + "strings" + "time" + + "github.com/oklog/ulid" + "github.com/prometheus/common/model" +) + +const ( + dayLayout = "20060102" + hourLayout = "20060102T15" +) + +func getTimeLayout(d model.Duration) string { + if time.Duration(d) >= 24*time.Hour { + return dayLayout + } else { + return hourLayout + } +} + +type PartitionKey string + +// CreatePartitionKey creates a partition key for a block. It is meant to be used for newly inserted blocks, as it relies +// on the index's currently configured partition duration to create the key. +// +// Note: Using this for existing blocks following a partition duration change can produce the wrong key. Callers should +// verify that the returned partition actually contains the block. +func CreatePartitionKey(blockId string, dur time.Duration) PartitionKey { + t := ulid.Time(ulid.MustParse(blockId).Time()).UTC() + + var b strings.Builder + b.Grow(16) + + year, month, day := t.Date() + b.WriteString(fmt.Sprintf("%04d%02d%02d", year, month, day)) + + partitionDuration := dur + if partitionDuration < 24*time.Hour { + hour := (t.Hour() / int(partitionDuration.Hours())) * int(partitionDuration.Hours()) + b.WriteString(fmt.Sprintf("T%02d", hour)) + } + + mDuration := model.Duration(partitionDuration) + b.WriteString(".") + b.WriteString(mDuration.String()) + + return PartitionKey(b.String()) +} + +func (k PartitionKey) Parse() (t time.Time, d time.Duration, err error) { + parts := strings.Split(string(k), ".") + if len(parts) != 2 { + return time.Time{}, 0, fmt.Errorf("invalid partition key: %s", k) + } + mDur, err := model.ParseDuration(parts[1]) + if err != nil { + return time.Time{}, 0, fmt.Errorf("invalid duration in partition key: %s", k) + } + t, err = time.Parse(getTimeLayout(mDur), parts[0]) + return t, time.Duration(mDur), err +} diff --git a/pkg/experiment/metastore/index/store/partition_key_test.go b/pkg/experiment/metastore/index/store/partition_key_test.go new file mode 100644 index 0000000000..de9f2462ae --- /dev/null +++ b/pkg/experiment/metastore/index/store/partition_key_test.go @@ -0,0 +1,67 @@ +package store + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/grafana/pyroscope/pkg/test" +) + +func TestIndex_GetPartitionKey(t *testing.T) { + tests := []struct { + name string + duration time.Duration + blockId string + want PartitionKey + }{ + { + name: "1d", + duration: test.Duration("1d"), + blockId: test.ULID("2024-07-15T16:13:43.245Z"), + want: PartitionKey("20240715.1d"), + }, + { + name: "1h at start of the window", + duration: test.Duration("1h"), + blockId: test.ULID("2024-07-15T16:00:00.000Z"), + want: PartitionKey("20240715T16.1h"), + }, + { + name: "1h in the middle of the window", + duration: test.Duration("1h"), + blockId: test.ULID("2024-07-15T16:13:43.245Z"), + want: PartitionKey("20240715T16.1h"), + }, + { + name: "1h at the end of the window", + duration: test.Duration("1h"), + blockId: test.ULID("2024-07-15T16:59:59.999Z"), + want: PartitionKey("20240715T16.1h"), + }, + { + name: "6h duration at midnight", + duration: test.Duration("6h"), + blockId: test.ULID("2024-07-15T00:00:00.000Z"), + want: PartitionKey("20240715T00.6h"), + }, + { + name: "6h at the middle of a window", + duration: test.Duration("6h"), + blockId: test.ULID("2024-07-15T15:13:43.245Z"), + want: PartitionKey("20240715T12.6h"), + }, + { + name: "6h at the end of the window", + duration: test.Duration("6h"), + blockId: test.ULID("2024-07-15T23:59:59.999Z"), + want: PartitionKey("20240715T18.6h"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, CreatePartitionKey(tt.blockId, tt.duration), "CreatePartitionKey(%v)", tt.blockId) + }) + } +} diff --git a/pkg/experiment/metastore/index_raft_handler.go b/pkg/experiment/metastore/index_raft_handler.go index 751aba12b9..aa76d9dd69 100644 --- a/pkg/experiment/metastore/index_raft_handler.go +++ b/pkg/experiment/metastore/index_raft_handler.go @@ -4,81 +4,62 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/hashicorp/raft" + "github.com/pkg/errors" "go.etcd.io/bbolt" metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" "github.com/grafana/pyroscope/pkg/experiment/metastore/index" ) -type IndexInserter interface { - FindBlock(tx *bbolt.Tx, shard uint32, tenant string, block string) *metastorev1.BlockMeta - InsertBlock(*bbolt.Tx, *metastorev1.BlockMeta) - CreatePartitionKey(string) index.PartitionKey +type Index interface { + InsertBlock(*bbolt.Tx, *metastorev1.BlockMeta) error } -type DeletionMarkChecker interface { - IsMarked(string) bool +type Tombstones interface { + Exists(*metastorev1.BlockMeta) bool } type Compactor interface { - CompactBlock(*bbolt.Tx, *raft.Log, *metastorev1.BlockMeta) error + Compact(*bbolt.Tx, *raft.Log, *metastorev1.BlockMeta) error } type IndexCommandHandler struct { - logger log.Logger - index IndexInserter - marks DeletionMarkChecker - compactor Compactor + logger log.Logger + index Index + tombstones Tombstones + compactor Compactor } func NewIndexCommandHandler( logger log.Logger, - index IndexInserter, - marks DeletionMarkChecker, + index Index, + tombstones Tombstones, compactor Compactor, ) *IndexCommandHandler { return &IndexCommandHandler{ - logger: logger, - index: index, - marks: marks, - compactor: compactor, + logger: logger, + index: index, + tombstones: tombstones, + compactor: compactor, } } -func (m *IndexCommandHandler) AddBlock(tx *bbolt.Tx, cmd *raft.Log, request *metastorev1.AddBlockRequest) (*metastorev1.AddBlockResponse, error) { - if m.marks.IsMarked(request.Block.Id) { - _ = level.Warn(m.logger).Log("msg", "block already added and compacted", "block_id", request.Block.Id) - return &metastorev1.AddBlockResponse{}, nil +func (m *IndexCommandHandler) AddBlock(tx *bbolt.Tx, cmd *raft.Log, req *metastorev1.AddBlockRequest) (*metastorev1.AddBlockResponse, error) { + if m.tombstones.Exists(req.Block) { + level.Warn(m.logger).Log("msg", "block already added and compacted", "block_id", req.Block.Id) + return new(metastorev1.AddBlockResponse), nil } - if m.index.FindBlock(tx, request.Block.Shard, request.Block.TenantId, request.Block.Id) != nil { - _ = level.Warn(m.logger).Log("msg", "block already added", "block_id", request.Block.Id) - return &metastorev1.AddBlockResponse{}, nil - } - - partKey := m.index.CreatePartitionKey(request.Block.Id) - err := persistBlock(tx, partKey, request.Block) - if err == nil { - err = m.compactor.CompactBlock(tx, cmd, request.Block) + if err := m.index.InsertBlock(tx, req.Block); err != nil { + if errors.Is(err, index.ErrBlockExists) { + level.Warn(m.logger).Log("msg", "block already added", "block_id", req.Block.Id) + return new(metastorev1.AddBlockResponse), nil + } + level.Error(m.logger).Log("msg", "failed to add block to index", "block_id", req.Block.Id) + return nil, err } - if err != nil { - _ = level.Error(m.logger).Log( - "msg", "failed to add block", - "block", request.Block.Id, - "err", err, - ) + if err := m.compactor.Compact(tx, cmd, req.Block); err != nil { + level.Error(m.logger).Log("msg", "failed to add block to compaction", "block", req.Block.Id, "err", err) return nil, err } - m.index.InsertBlock(tx, request.Block) return &metastorev1.AddBlockResponse{}, nil } - -func persistBlock(tx *bbolt.Tx, partKey index.PartitionKey, block *metastorev1.BlockMeta) error { - key := []byte(block.Id) - value, err := block.MarshalVT() - if err != nil { - return err - } - return index.UpdateBlockMetadataBucket(tx, partKey, block.Shard, block.TenantId, func(bucket *bbolt.Bucket) error { - return bucket.Put(key, value) - }) -} diff --git a/pkg/experiment/metastore/index_service.go b/pkg/experiment/metastore/index_service.go index ccb1d83b92..6d42b66b0c 100644 --- a/pkg/experiment/metastore/index_service.go +++ b/pkg/experiment/metastore/index_service.go @@ -6,11 +6,13 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/oklog/ulid" + "go.etcd.io/bbolt" metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" placement "github.com/grafana/pyroscope/pkg/experiment/distributor/placement/adaptive_placement" "github.com/grafana/pyroscope/pkg/experiment/metastore/fsm" + "github.com/grafana/pyroscope/pkg/experiment/metastore/raftnode" "github.com/grafana/pyroscope/pkg/iter" ) @@ -20,22 +22,28 @@ type PlacementStats interface { func NewIndexService( logger log.Logger, - raftLog Raft, + raft Raft, + state State, + index IndexQuerier, stats PlacementStats, ) *IndexService { return &IndexService{ - logger: logger, - raftLog: raftLog, - stats: stats, + logger: logger, + raft: raft, + state: state, + index: index, + stats: stats, } } type IndexService struct { metastorev1.IndexServiceServer - logger log.Logger - raftLog Raft - stats PlacementStats + logger log.Logger + raft Raft + state State + index IndexQuerier + stats PlacementStats } func (svc *IndexService) AddBlock( @@ -65,12 +73,33 @@ func (svc *IndexService) addBlockMetadata( _ = level.Warn(svc.logger).Log("invalid metadata", "block_id", req.Block.Id, "err", err) return nil, err } - resp, err := svc.raftLog.Propose(fsm.RaftLogEntryType(raft_log.RaftCommand_RAFT_COMMAND_ADD_BLOCK), req) - if err != nil { + if err := proposeAddBlockMetadata(svc.raft, req.Block); err != nil { _ = level.Error(svc.logger).Log("msg", "failed to add block", "block_id", req.Block.Id, "err", err) return nil, err } - return resp.(*metastorev1.AddBlockResponse), err + return new(metastorev1.AddBlockResponse), nil +} + +func proposeAddBlockMetadata(raft Raft, md *metastorev1.BlockMeta) error { + _, err := raft.Propose( + fsm.RaftLogEntryType(raft_log.RaftCommand_RAFT_COMMAND_ADD_BLOCK_METADATA), + &raft_log.AddBlockMetadataRequest{Metadata: md}, + ) + return err +} + +func (svc *IndexService) GetBlockMetadata( + ctx context.Context, + req *metastorev1.GetBlockMetadataRequest, +) (*metastorev1.GetBlockMetadataResponse, error) { + var found []*metastorev1.BlockMeta + err := svc.state.ConsistentRead(ctx, func(tx *bbolt.Tx, _ raftnode.ReadIndex) { + found = svc.index.FindBlocks(tx, req.GetBlocks()) + }) + if err != nil { + return nil, err + } + return &metastorev1.GetBlockMetadataResponse{Blocks: found}, nil } func statsFromMetadata(md *metastorev1.BlockMeta) iter.Iterator[placement.Sample] { diff --git a/pkg/experiment/metastore/markers/deletion_markers.go b/pkg/experiment/metastore/markers/deletion_markers.go deleted file mode 100644 index 64dc1921d9..0000000000 --- a/pkg/experiment/metastore/markers/deletion_markers.go +++ /dev/null @@ -1,243 +0,0 @@ -package markers - -import ( - "encoding/binary" - "flag" - "fmt" - "sync" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/client_golang/prometheus" - "go.etcd.io/bbolt" - - "github.com/grafana/pyroscope/pkg/util" -) - -type metrics struct { - markedBlocks *prometheus.CounterVec - expiredBlocks *prometheus.CounterVec -} - -func newMetrics(reg prometheus.Registerer) *metrics { - m := &metrics{ - markedBlocks: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "block_cleaner_marked_block_count", - Help: "The number of blocks marked as removed", - }, []string{"tenant", "shard"}), - expiredBlocks: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "block_cleaner_expired_block_count", - Help: "The number of marked blocks that expired and were removed", - }, []string{"tenant", "shard"}), - } - if reg != nil { - util.Register(reg, - m.markedBlocks, - m.expiredBlocks, - ) - } - return m -} - -const ( - removedBlocksBucketName = "removed-blocks" -) - -var removedBlocksBucketNameBytes = []byte(removedBlocksBucketName) - -type Config struct { - CompactedBlocksCleanupInterval time.Duration `yaml:"compacted_blocks_cleanup_interval"` - CompactedBlocksCleanupDelay time.Duration `yaml:"compacted_blocks_cleanup_delay"` -} - -func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - f.DurationVar(&cfg.CompactedBlocksCleanupDelay, prefix+"compacted-blocks-cleanup-delay", time.Minute*30, "The grace period for permanently deleting compacted blocks.") - f.DurationVar(&cfg.CompactedBlocksCleanupInterval, prefix+"compacted-blocks-cleanup-interval", time.Minute, "The interval at which block cleanup is performed.") -} - -type BlockRemovalContext struct { - Shard uint32 - Tenant string - ExpiryTs int64 -} - -type DeletionMarkers struct { - blockMarkers map[string]*BlockRemovalContext - mu sync.Mutex - - logger log.Logger - cfg *Config - metrics *metrics -} - -func NewDeletionMarkers(logger log.Logger, cfg *Config, reg prometheus.Registerer) *DeletionMarkers { - return &DeletionMarkers{ - blockMarkers: make(map[string]*BlockRemovalContext), - logger: logger, - cfg: cfg, - metrics: newMetrics(reg), - } -} - -func (m *DeletionMarkers) Init(tx *bbolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(removedBlocksBucketNameBytes) - return err -} - -func (m *DeletionMarkers) Restore(tx *bbolt.Tx) error { - m.mu.Lock() - defer m.mu.Unlock() - clear(m.blockMarkers) - bkt := tx.Bucket(removedBlocksBucketNameBytes) - err := bkt.ForEachBucket(func(k []byte) error { - shardBkt := bkt.Bucket(k) - if shardBkt == nil { - return nil - } - shard := binary.BigEndian.Uint32(k) - return shardBkt.ForEach(func(k, v []byte) error { - if len(k) < 34 { - return fmt.Errorf("block key too short (expected 34 chars, was %d)", len(k)) - } - blockId := string(k[:26]) - m.blockMarkers[blockId] = &BlockRemovalContext{ - Shard: shard, - Tenant: string(k[34:]), - ExpiryTs: int64(binary.BigEndian.Uint64(k[26:34])), - } - return nil - }) - }) - if err != nil { - return err - } - - level.Info(m.logger).Log("msg", "loaded metastore block deletion markers", "marker_count", len(m.blockMarkers)) - return nil -} - -func (m *DeletionMarkers) Mark(tx *bbolt.Tx, shard uint32, tenant string, blockId string, deletedTs int64) error { - if m.IsMarked(blockId) { - return nil - } - expiryTs := deletedTs + m.cfg.CompactedBlocksCleanupDelay.Milliseconds() - bkt, err := tx.CreateBucketIfNotExists(removedBlocksBucketNameBytes) - if err != nil { - return err - } - shardBkt, err := getOrCreateSubBucket(bkt, getShardBucketName(shard)) - if err != nil { - return err - } - blockKey := getBlockKey(blockId, expiryTs, tenant) - if err = shardBkt.Put(blockKey, []byte{}); err != nil { - return err - } - if err != nil { - return err - } - m.mu.Lock() - defer m.mu.Unlock() - m.blockMarkers[blockId] = &BlockRemovalContext{ - Shard: shard, - Tenant: tenant, - ExpiryTs: expiryTs, - } - m.metrics.markedBlocks.WithLabelValues(tenant, fmt.Sprint(shard)).Inc() - return nil -} - -func (m *DeletionMarkers) IsMarked(blockId string) bool { - m.mu.Lock() - defer m.mu.Unlock() - _, ok := m.blockMarkers[blockId] - return ok -} - -func (m *DeletionMarkers) FindExpiredMarkers(now int64) map[string]*BlockRemovalContext { - blocks := make(map[string]*BlockRemovalContext) - m.mu.Lock() - defer m.mu.Unlock() - for b, removalContext := range m.blockMarkers { - if removalContext.ExpiryTs < now { - blocks[b] = removalContext - } - } - return blocks -} - -func (m *DeletionMarkers) Remove(tx *bbolt.Tx, markers map[string]*BlockRemovalContext) error { - m.mu.Lock() - defer m.mu.Unlock() - if len(markers) == 0 { - return nil - } - markersPerShard := make(map[uint32]map[string]*BlockRemovalContext) - for blockId, removalContext := range markers { - s, ok := markersPerShard[removalContext.Shard] - if !ok { - s = make(map[string]*BlockRemovalContext) - markersPerShard[removalContext.Shard] = s - } - s[blockId] = removalContext - } - bkt, err := getPendingBlockRemovalsBucket(tx) - if err != nil { - return err - } - for shard, shardMarkers := range markersPerShard { - shardBkt, err := getOrCreateSubBucket(bkt, getShardBucketName(shard)) - if err != nil { - return err - } - for b, m := range shardMarkers { - key := getBlockKey(b, m.ExpiryTs, m.Tenant) - err := shardBkt.Delete(key) - if err != nil { - return err - } - } - } - if err != nil { - return err - } - for b, removalContext := range markers { - delete(m.blockMarkers, b) - level.Debug(m.logger).Log( - "msg", "removed block from pending block removals", - "blockId", b, - "Shard", removalContext.Shard, - "Tenant", removalContext.Tenant, - "ExpiryTs", removalContext.ExpiryTs) - m.metrics.expiredBlocks.WithLabelValues(removalContext.Tenant, fmt.Sprint(removalContext.Shard)).Inc() - } - level.Info(m.logger).Log("msg", "finished deletion marker cleanup", "markers_removed", len(markers)) - return nil -} - -func getPendingBlockRemovalsBucket(tx *bbolt.Tx) (*bbolt.Bucket, error) { - return tx.CreateBucketIfNotExists(removedBlocksBucketNameBytes) -} - -func getOrCreateSubBucket(parent *bbolt.Bucket, name []byte) (*bbolt.Bucket, error) { - bucket := parent.Bucket(name) - if bucket == nil { - return parent.CreateBucket(name) - } - return bucket, nil -} - -func getShardBucketName(shard uint32) []byte { - shardBucketName := make([]byte, 4) - binary.BigEndian.PutUint32(shardBucketName, shard) - return shardBucketName -} - -func getBlockKey(blockId string, expiryTs int64, tenant string) []byte { - blockKey := make([]byte, 26+8+len(tenant)) - copy(blockKey[:26], blockId) - binary.BigEndian.PutUint64(blockKey[26:34], uint64(expiryTs)) - copy(blockKey[34:], tenant) - return blockKey -} diff --git a/pkg/experiment/metastore/markers/deletion_markers_test.go b/pkg/experiment/metastore/markers/deletion_markers_test.go deleted file mode 100644 index 2ef1f92128..0000000000 --- a/pkg/experiment/metastore/markers/deletion_markers_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package markers - -import ( - "crypto/rand" - "path/filepath" - "testing" - "time" - - "github.com/oklog/ulid" - "github.com/stretchr/testify/require" - "go.etcd.io/bbolt" - - "github.com/grafana/pyroscope/pkg/util" -) - -func Test_AddAndCheck(t *testing.T) { - db := createDb(t) - markers := NewDeletionMarkers( - util.Logger, - &Config{CompactedBlocksCleanupDelay: time.Second * 2}, - nil, - ) - - tx, _ := db.Begin(true) - blockId := ulid.MustNew(ulid.Now(), rand.Reader).String() - err := markers.Mark(tx, 0, "Tenant", blockId, 1000) - require.NoError(t, err) - - require.True(t, markers.IsMarked(blockId)) -} - -func createDb(t *testing.T) *bbolt.DB { - opts := *bbolt.DefaultOptions - opts.ReadOnly = false - opts.NoSync = true - db, err := bbolt.Open(filepath.Join(t.TempDir(), "db.boltdb"), 0644, &opts) - require.NoError(t, err) - return db -} diff --git a/pkg/experiment/metastore/metastore.go b/pkg/experiment/metastore/metastore.go index cd307f1ed6..0b5c3bffce 100644 --- a/pkg/experiment/metastore/metastore.go +++ b/pkg/experiment/metastore/metastore.go @@ -19,12 +19,14 @@ import ( metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" placement "github.com/grafana/pyroscope/pkg/experiment/distributor/placement/adaptive_placement" + "github.com/grafana/pyroscope/pkg/experiment/metastore/compaction/compactor" + "github.com/grafana/pyroscope/pkg/experiment/metastore/compaction/scheduler" "github.com/grafana/pyroscope/pkg/experiment/metastore/dlq" "github.com/grafana/pyroscope/pkg/experiment/metastore/fsm" "github.com/grafana/pyroscope/pkg/experiment/metastore/index" - "github.com/grafana/pyroscope/pkg/experiment/metastore/markers" raft "github.com/grafana/pyroscope/pkg/experiment/metastore/raftnode" "github.com/grafana/pyroscope/pkg/experiment/metastore/raftnode/raftnodepb" + "github.com/grafana/pyroscope/pkg/experiment/metastore/tombstones" "github.com/grafana/pyroscope/pkg/util/health" ) @@ -32,12 +34,12 @@ type Config struct { Address string `yaml:"address"` GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config" doc:"description=Configures the gRPC client used to communicate with the metastore."` DataDir string `yaml:"data_dir"` - Raft raft.Config `yaml:"raft"` - Compaction CompactionConfig `yaml:"compaction_config" category:"advanced"` MinReadyDuration time.Duration `yaml:"min_ready_duration" category:"advanced"` - DLQRecovery dlq.RecoveryConfig `yaml:"dlq_recovery" category:"advanced"` - Index index.Config `yaml:"index_config" category:"advanced"` - BlockCleaner markers.Config `yaml:"block_cleaner_config" category:"advanced"` + Raft raft.Config `yaml:"raft"` + Index index.Config `yaml:",inline" category:"advanced"` + DLQRecovery dlq.RecoveryConfig `yaml:",inline" category:"advanced"` + Compactor compactor.Config `yaml:",inline" category:"advanced"` + Scheduler scheduler.Config `yaml:",inline" category:"advanced"` } func (cfg *Config) RegisterFlags(f *flag.FlagSet) { @@ -45,12 +47,12 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.Address, prefix+"address", "localhost:9095", "") f.StringVar(&cfg.DataDir, prefix+"data-dir", "./data-metastore/data", "") f.DurationVar(&cfg.MinReadyDuration, prefix+"min-ready-duration", 15*time.Second, "Minimum duration to wait after the internal readiness checks have passed but before succeeding the readiness endpoint. This is used to slowdown deployment controllers (eg. Kubernetes) after an instance is ready and before they proceed with a rolling update, to give the rest of the cluster instances enough time to receive some (DNS?) updates.") - cfg.Raft.RegisterFlagsWithPrefix(prefix+"raft.", f) cfg.GRPCClientConfig.RegisterFlagsWithPrefix(prefix+"grpc-client-config", f) - cfg.Compaction.RegisterFlagsWithPrefix(prefix+"compaction.", f) - cfg.Index.RegisterFlagsWithPrefix(prefix+"index.", f) - cfg.BlockCleaner.RegisterFlagsWithPrefix(prefix+"block-cleaner.", f) - cfg.DLQRecovery.RegisterFlagsWithPrefix(prefix+"dlq-recovery.", f) + cfg.Raft.RegisterFlagsWithPrefix(prefix+"raft.", f) + cfg.Compactor.RegisterFlagsWithPrefix(prefix, f) + cfg.Scheduler.RegisterFlagsWithPrefix(prefix, f) + cfg.Index.RegisterFlagsWithPrefix(prefix, f) + cfg.DLQRecovery.RegisterFlagsWithPrefix(prefix, f) } func (cfg *Config) Validate() error { @@ -74,23 +76,21 @@ type Metastore struct { raft *raft.Node fsm *fsm.FSM - followerRead *raft.StateReader[*bbolt.Tx] - bucket objstore.Bucket placement *placement.Manager dlqRecovery *dlq.Recovery index *index.Index - markers *markers.DeletionMarkers - indexService *IndexService indexHandler *IndexCommandHandler + indexService *IndexService - compactionService *CompactionService + tombstones *tombstones.Tombstones + compactor *compactor.Compactor + scheduler *scheduler.Scheduler compactionHandler *CompactionCommandHandler + compactionService *CompactionService - cleanerService *CleanerService - cleanerHandler *CleanerCommandHandler - + followerRead *raft.StateReader[*bbolt.Tx] tenantService *TenantService metadataService *MetadataQueryService @@ -110,7 +110,7 @@ func New( m := &Metastore{ config: config, logger: logger, - reg: prometheus.WrapRegistererWithPrefix("pyroscope_metastore_", reg), + reg: reg, health: healthService, bucket: bucket, placement: placementMgr, @@ -123,38 +123,33 @@ func New( } // Initialization of the base components. - indexStore := index.NewIndexStore(m.logger) - m.index = index.NewIndex(m.logger, indexStore, &config.Index) - m.markers = markers.NewDeletionMarkers(m.logger, &config.BlockCleaner, m.reg) + m.index = index.NewIndex(m.logger, index.NewStore(), &config.Index) + m.tombstones = tombstones.NewTombstones(tombstones.NewStore()) + m.compactor = compactor.NewCompactor(config.Compactor, compactor.NewStore(), m.tombstones, m.reg) + m.scheduler = scheduler.NewScheduler(config.Scheduler, scheduler.NewStore(), m.reg) // FSM handlers that utilize the components. - m.compactionHandler = NewCompactionCommandHandler(m.logger, m.config.Compaction, m.index, m.markers, m.reg) - m.indexHandler = NewIndexCommandHandler(m.logger, m.index, m.markers, m.compactionHandler) - m.cleanerHandler = NewCleanerCommandHandler(m.logger, m.bucket, m.markers, m.reg) - - m.fsm.RegisterRestorer(m.index) - m.fsm.RegisterRestorer(m.compactionHandler) - m.fsm.RegisterRestorer(m.markers) - + m.indexHandler = NewIndexCommandHandler(m.logger, m.index, m.tombstones, m.compactor) fsm.RegisterRaftCommandHandler(m.fsm, - fsm.RaftLogEntryType(raft_log.RaftCommand_RAFT_COMMAND_POLL_COMPACTION_JOBS), - m.compactionHandler.PollCompactionJobs) - - fsm.RegisterRaftCommandHandler(m.fsm, - fsm.RaftLogEntryType(raft_log.RaftCommand_RAFT_COMMAND_ADD_BLOCK), + fsm.RaftLogEntryType(raft_log.RaftCommand_RAFT_COMMAND_ADD_BLOCK_METADATA), m.indexHandler.AddBlock) - m.fsm.RegisterRestorer(m.markers) + m.compactionHandler = NewCompactionCommandHandler(m.logger, m.index, m.compactor, m.compactor, m.scheduler, m.tombstones) fsm.RegisterRaftCommandHandler(m.fsm, - fsm.RaftLogEntryType(raft_log.RaftCommand_RAFT_COMMAND_CLEAN_BLOCKS), - m.cleanerHandler.CleanBlocks) + fsm.RaftLogEntryType(raft_log.RaftCommand_RAFT_COMMAND_GET_COMPACTION_PLAN_UPDATE), + m.compactionHandler.GetCompactionPlanUpdate) + fsm.RegisterRaftCommandHandler(m.fsm, + fsm.RaftLogEntryType(raft_log.RaftCommand_RAFT_COMMAND_UPDATE_COMPACTION_PLAN), + m.compactionHandler.UpdateCompactionPlan) - if err = m.fsm.Init(); err != nil { - return nil, fmt.Errorf("failed to initialize internal state: %w", err) - } + m.fsm.RegisterRestorer(m.tombstones) + m.fsm.RegisterRestorer(m.compactor) + m.fsm.RegisterRestorer(m.scheduler) + m.fsm.RegisterRestorer(m.index) - if m.raft, err = raft.NewNode(m.logger, m.config.Raft, m.reg, m.fsm); err != nil { - return nil, fmt.Errorf("failed to initialize raft: %w", err) + // We are ready to start raft as our FSM is fully configured. + if err = m.buildRaftNode(); err != nil { + return nil, err } // Create the read-only interface to the state. @@ -165,9 +160,8 @@ func New( // Services should be registered after FSM and Raft have been initialized. // Services provide an interface to interact with the metastore. - m.indexService = NewIndexService(m.logger, m.raft, m.placement) m.compactionService = NewCompactionService(m.logger, m.raft) - m.cleanerService = NewCleanerService(m.logger, m.config.BlockCleaner, m.raft, m.cleanerHandler) + m.indexService = NewIndexService(m.logger, m.raft, m.followerRead, m.index, m.placement) m.tenantService = NewTenantService(m.logger, m.followerRead, m.index) m.metadataService = NewMetadataQueryService(m.logger, m.followerRead, m.index) m.dlqRecovery = dlq.NewRecovery(logger, config.DLQRecovery, m.indexService, bucket) @@ -177,12 +171,52 @@ func New( // service is starting, so it should be able to handle conflicts. m.raft.RunOnLeader(m.dlqRecovery) m.raft.RunOnLeader(m.placement) - m.raft.RunOnLeader(m.cleanerService) m.service = services.NewBasicService(m.starting, m.running, m.stopping) return m, nil } +func (m *Metastore) buildRaftNode() (err error) { + // Raft is configured to always restore the state from the latest snapshot + // (via FSM.Restore), if it is present. Otherwise, when no snapshots + // available, the state must be initialized explicitly via FSM.Init before + // we call raft.Init, which starts applying the raft log. + if m.raft, err = raft.NewNode(m.logger, m.config.Raft, m.reg, m.fsm); err != nil { + return fmt.Errorf("failed to create raft node: %w", err) + } + + // Newly created raft node is not yet initialized and does not alter our + // FSM in any way. However, it gives us access to the snapshot store, and + // we can check whether we need to initialize the state (expensive), or we + // can defer to raft snapshots. This is an optimization: we want to avoid + // restoring the state twice: once at Init, and then at Restore. + snapshots, err := m.raft.ListSnapshots() + if err != nil { + level.Error(m.logger).Log("msg", "failed to list snapshots", "err", err) + // We continue trying; in the worst case we will initialize the state + // and then restore a snapshot received from the leader. + } + + if len(snapshots) == 0 { + level.Info(m.logger).Log("msg", "no state snapshots found") + // FSM won't be restored by raft, so we need to initialize it manually. + // Otherwise, raft will restore the state from a snapshot using + // fsm.Restore, which will initialize the state as well. + if err = m.fsm.Init(); err != nil { + level.Error(m.logger).Log("msg", "failed to initialize state", "err", err) + return err + } + } else { + level.Info(m.logger).Log("msg", "skipping state initialization as snapshots found") + } + + if err = m.raft.Init(); err != nil { + return fmt.Errorf("failed to initialize raft: %w", err) + } + + return nil +} + func (m *Metastore) Register(server *grpc.Server) { metastorev1.RegisterIndexServiceServer(server, m.indexService) metastorev1.RegisterCompactionServiceServer(server, m.compactionService) @@ -196,9 +230,6 @@ func (m *Metastore) Service() services.Service { return m.service } func (m *Metastore) starting(context.Context) error { return nil } func (m *Metastore) stopping(_ error) error { - m.cleanerService.Stop() - m.dlqRecovery.Stop() - // We let clients observe the leadership transfer: it's their // responsibility to connect to the new leader. We only need to // make sure that any error returned to clients includes details diff --git a/pkg/experiment/metastore/metastore_raft.go b/pkg/experiment/metastore/metastore_raft.go index 6eb983bfea..cdb5a6c67b 100644 --- a/pkg/experiment/metastore/metastore_raft.go +++ b/pkg/experiment/metastore/metastore_raft.go @@ -21,7 +21,7 @@ type Raft interface { // State represents a consistent read-only view of the metastore. // The write interface is provided through the FSM raft command handlers. type State interface { - ConsistentRead(context.Context, func(*bbolt.Tx)) error + ConsistentRead(context.Context, func(*bbolt.Tx, raftnode.ReadIndex)) error } // newFollowerReader creates a new follower reader – implementation of the @@ -37,7 +37,7 @@ func (m *Metastore) newFollowerReader( // raft node to implement Leader Read pattern. &leaderNode{client: client, timeout: m.config.Raft.ApplyTimeout}, &localNode{node: node, fsm: fsm}, - m.config.Raft.AppliedIndexCheckInterval, + m.config.Raft.LogIndexCheckInterval, m.config.Raft.ReadIndexMaxDistance, ) } @@ -50,14 +50,16 @@ type leaderNode struct { timeout time.Duration } -func (l *leaderNode) ReadIndex() (uint64, error) { +func (l *leaderNode) ReadIndex() (read raftnode.ReadIndex, err error) { ctx, cancel := context.WithTimeout(context.Background(), l.timeout) defer cancel() resp, err := l.client.ReadIndex(ctx, new(raftnodepb.ReadIndexRequest)) if err != nil { - return 0, err + return read, err } - return resp.ReadIndex, nil + read.CommitIndex = resp.CommitIndex + read.Term = resp.Term + return read, nil } // localNode represents the state machine of the local node. diff --git a/pkg/experiment/metastore/query_service.go b/pkg/experiment/metastore/query_service.go index c5dc64ea48..bf26940caa 100644 --- a/pkg/experiment/metastore/query_service.go +++ b/pkg/experiment/metastore/query_service.go @@ -16,10 +16,12 @@ import ( metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" "github.com/grafana/pyroscope/pkg/experiment/metastore/index" + "github.com/grafana/pyroscope/pkg/experiment/metastore/raftnode" "github.com/grafana/pyroscope/pkg/model" ) type IndexQuerier interface { + FindBlocks(tx *bbolt.Tx, list *metastorev1.BlockList) []*metastorev1.BlockMeta FindBlocksInRange(tx *bbolt.Tx, start, end int64, tenants map[string]struct{}) []*metastorev1.BlockMeta ForEachPartition(ctx context.Context, f func(*index.PartitionMeta) error) error } @@ -48,7 +50,12 @@ func (svc *MetadataQueryService) QueryMetadata( ctx context.Context, req *metastorev1.QueryMetadataRequest, ) (resp *metastorev1.QueryMetadataResponse, err error) { - read := func(tx *bbolt.Tx) { + read := func(tx *bbolt.Tx, _ raftnode.ReadIndex) { + // NOTE(kolesnikovae): that there's a little chance that we read + // applied changes not yet committed by the quorum, because we + // ignore the read index. This is fine in 99.(9)% of cases. + // In the future we should ensure isolation that ensure that we + // do not access the state beyond the read index. resp, err = svc.listBlocksForQuery(ctx, tx, req) } if readErr := svc.state.ConsistentRead(ctx, read); readErr != nil { diff --git a/pkg/experiment/metastore/raftnode/node.go b/pkg/experiment/metastore/raftnode/node.go index 1bb4338e35..fb08afb372 100644 --- a/pkg/experiment/metastore/raftnode/node.go +++ b/pkg/experiment/metastore/raftnode/node.go @@ -33,9 +33,9 @@ type Config struct { BindAddress string `yaml:"bind_address"` AdvertiseAddress string `yaml:"advertise_address"` - ApplyTimeout time.Duration `yaml:"apply_timeout" doc:"hidden"` - AppliedIndexCheckInterval time.Duration `yaml:"applied_index_check_interval" doc:"hidden"` - ReadIndexMaxDistance uint64 `yaml:"read_index_max_distance" doc:"hidden"` + ApplyTimeout time.Duration `yaml:"apply_timeout" doc:"hidden"` + LogIndexCheckInterval time.Duration `yaml:"log_index_check_interval" doc:"hidden"` + ReadIndexMaxDistance uint64 `yaml:"read_index_max_distance" doc:"hidden"` WALCacheEntries uint64 `yaml:"wal_cache_entries" doc:"hidden"` TrailingLogs uint64 `yaml:"trailing_logs" doc:"hidden"` @@ -67,7 +67,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.StringVar(&cfg.AdvertiseAddress, prefix+"advertise-address", "localhost:9099", "") f.DurationVar(&cfg.ApplyTimeout, prefix+"apply-timeout", 5*time.Second, "") - f.DurationVar(&cfg.AppliedIndexCheckInterval, prefix+"applied-index-check-interval", 14*time.Millisecond, "") + f.DurationVar(&cfg.LogIndexCheckInterval, prefix+"log-index-check-interval", 14*time.Millisecond, "") f.Uint64Var(&cfg.ReadIndexMaxDistance, prefix+"read-index-max-distance", 10<<10, "") f.Uint64Var(&cfg.WALCacheEntries, prefix+"wal-cache-entries", defaultWALCacheEntries, "") @@ -124,10 +124,6 @@ func NewNode( } }() - hasState, err := n.openStore() - if err != nil { - return nil, err - } addr, err := net.ResolveTCPAddr("tcp", config.AdvertiseAddress) if err != nil { return nil, err @@ -141,55 +137,64 @@ func NewNode( return nil, err } + if err = n.openStore(); err != nil { + return nil, err + } + + return &n, nil +} + +func (n *Node) Init() (err error) { raftConfig := raft.DefaultConfig() // TODO: Wrap gokit // config.Logger raftConfig.LogLevel = "debug" - raftConfig.TrailingLogs = config.TrailingLogs - raftConfig.SnapshotThreshold = config.SnapshotThreshold - raftConfig.SnapshotInterval = config.SnapshotInterval + raftConfig.TrailingLogs = n.config.TrailingLogs + raftConfig.SnapshotThreshold = n.config.SnapshotThreshold + raftConfig.SnapshotInterval = n.config.SnapshotInterval raftConfig.LocalID = raft.ServerID(n.config.ServerID) - n.raft, err = raft.NewRaft(raftConfig, fsm, n.logStore, n.stableStore, n.snapshotStore, n.transport) + n.raft, err = raft.NewRaft(raftConfig, n.fsm, n.logStore, n.stableStore, n.snapshotStore, n.transport) if err != nil { - return nil, fmt.Errorf("starting raft node: %w", err) + return fmt.Errorf("starting raft node: %w", err) } + n.observer = NewRaftStateObserver(n.logger, n.raft, n.reg) + n.service = NewRaftNodeService(n) + hasState, err := raft.HasExistingState(n.logStore, n.stableStore, n.snapshotStore) + if err != nil { + return fmt.Errorf("failed to check for existing state: %w", err) + } if !hasState { level.Warn(n.logger).Log("msg", "no existing state found, trying to bootstrap cluster") if err = n.bootstrap(); err != nil { - return nil, fmt.Errorf("failed to bootstrap cluster: %w", err) + return fmt.Errorf("failed to bootstrap cluster: %w", err) } } else { level.Debug(n.logger).Log("msg", "restoring existing state, not bootstrapping") } - n.observer = NewRaftStateObserver(n.logger, n.raft, reg) - n.service = NewRaftNodeService(&n) - return &n, nil + return nil } -func (n *Node) openStore() (hasState bool, err error) { +func (n *Node) openStore() (err error) { if err = n.createDirs(); err != nil { - return false, err + return err } n.wal, err = raftwal.Open(n.walDir) if err != nil { - return false, fmt.Errorf("failed to open WAL: %w", err) + return fmt.Errorf("failed to open WAL: %w", err) } n.snapshots, err = raft.NewFileSnapshotStore(n.config.Dir, int(n.config.SnapshotsRetain), os.Stderr) if err != nil { - return false, fmt.Errorf("failed to open shapshot store: %w", err) + return fmt.Errorf("failed to open shapshot store: %w", err) } n.logStore = n.wal n.logStore, _ = raft.NewLogCache(int(n.config.WALCacheEntries), n.logStore) n.stableStore = n.wal n.snapshotStore = n.snapshots - if hasState, err = raft.HasExistingState(n.logStore, n.stableStore, n.snapshotStore); err != nil { - return hasState, fmt.Errorf("failed to check for existing state: %w", err) - } - return hasState, nil + return nil } func (n *Node) createDirs() (err error) { @@ -222,6 +227,10 @@ func (n *Node) Shutdown() { } } +func (n *Node) ListSnapshots() ([]*raft.SnapshotMeta, error) { + return n.snapshots.List() +} + func (n *Node) Register(server *grpc.Server) { raftnodepb.RegisterRaftNodeServiceServer(server, n.service) } @@ -278,42 +287,3 @@ func (n *Node) Propose(t fsm.RaftLogEntryType, m proto.Message) (resp proto.Mess } return resp, r.Err } - -func (n *Node) AppliedIndex() uint64 { return n.raft.AppliedIndex() } - -// ReadIndex implements the Read Index technique. -// Please refer to the source Raft paper, paragraph 6.4. for details. -// https://web.stanford.edu/~ouster/cgi-bin/papers/OngaroPhD.pdf. -func (n *Node) ReadIndex() (uint64, error) { - // > If the leader has not yet marked an entry from its current term - // > committed, it waits until it has done so. The Leader Completeness - // > Property guarantees that a leader has all committed entries, but - // > at the start of its term, it may not know which those are. To find - // > out, it needs to commit an entry from its term. Raft handles this - // > by having each leader commit a blank no-op entry into the log at - // > the start of its term. As soon as this no-op entry is committed, - // > the leader’s commit index will be at least as large as any other - // > servers’ during its term. - // - // NOTE(kolesnikovae): CommitIndex always returns a valid commit index, - // even when no entries have been added in the current term. - // See the "runLeader" implementation (hashicorp raft) for details. - commitIndex := n.raft.CommitIndex() - // > The leader needs to make sure it has not been superseded by a newer - // > leader of which it is unaware. It issues a new round of heartbeats - // > and waits for their acknowledgments from a majority of the cluster. - // > Once these acknowledgments are received, the leader knows that there - // > could not have existed a leader for a greater term at the moment it - // > sent the heartbeats. Thus, the readIndex was, at the time, the - // > largest commit index ever seen by any server in the cluster. - err := n.raft.VerifyLeader().Error() - if err != nil { - // The error includes details about the actual leader the request - // should be directed to; the client should retry the operation. - return 0, WithRaftLeaderStatusDetails(err, n.raft) - } - // The commit index is up-to-date and the node is the leader: this is the - // lower bound of the state any query must operate against. This does not - // specify, however, that the upper bound (i.e. no snapshot isolation). - return commitIndex, nil -} diff --git a/pkg/experiment/metastore/raftnode/node_bootstrap.go b/pkg/experiment/metastore/raftnode/node_bootstrap.go index b88972af6e..fb25b7d806 100644 --- a/pkg/experiment/metastore/raftnode/node_bootstrap.go +++ b/pkg/experiment/metastore/raftnode/node_bootstrap.go @@ -41,7 +41,34 @@ func (n *Node) bootstrap() error { return nil } -func (n *Node) bootstrapPeers() ([]raft.Server, error) { +func (n *Node) bootstrapPeersWithRetries() (peers []raft.Server, err error) { + prov := dns.NewProvider(n.logger, n.reg, dns.MiekgdnsResolverType) + attempt := func() bool { + peers, err = n.bootstrapPeers(prov) + level.Debug(n.logger).Log("msg", "resolving bootstrap peers", "peers", fmt.Sprint(peers), "err", err) + if err != nil { + _ = level.Error(n.logger).Log("msg", "failed to resolve bootstrap peers", "err", err) + return false + } + return true + } + backoffConfig := backoff.Config{ + MinBackoff: 1 * time.Second, + MaxBackoff: 10 * time.Second, + MaxRetries: 20, + } + backOff := backoff.New(context.Background(), backoffConfig) + for backOff.Ongoing() { + if !attempt() { + backOff.Wait() + } else { + return peers, nil + } + } + return nil, fmt.Errorf("failed to resolve bootstrap peers after %d retries %w", backOff.NumRetries(), err) +} + +func (n *Node) bootstrapPeers(prov *dns.Provider) ([]raft.Server, error) { // The peer list always includes the local node. peers := make([]raft.Server, 0, len(n.config.BootstrapPeers)+1) peers = append(peers, raft.Server{ @@ -68,7 +95,6 @@ func (n *Node) bootstrapPeers() ([]raft.Server, error) { if len(resolve) > 0 { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - prov := dns.NewProvider(n.logger, n.reg, dns.MiekgdnsResolverType) if err := prov.Resolve(ctx, resolve); err != nil { return nil, fmt.Errorf("failed to resolve bootstrap peers: %w", err) } @@ -102,29 +128,3 @@ func (n *Node) bootstrapPeers() ([]raft.Server, error) { } return peers, nil } - -func (n *Node) bootstrapPeersWithRetries() (peers []raft.Server, err error) { - attempt := func() bool { - peers, err = n.bootstrapPeers() - level.Debug(n.logger).Log("msg", "resolving bootstrap peers", "peers", fmt.Sprint(peers), "err", err) - if err != nil { - _ = level.Error(n.logger).Log("msg", "failed to resolve bootstrap peers", "err", err) - return false - } - return true - } - backoffConfig := backoff.Config{ - MinBackoff: 1 * time.Second, - MaxBackoff: 10 * time.Second, - MaxRetries: 20, - } - backOff := backoff.New(context.Background(), backoffConfig) - for backOff.Ongoing() { - if !attempt() { - backOff.Wait() - } else { - return peers, nil - } - } - return nil, fmt.Errorf("failed to resolve bootstrap peers after %d retries %w", backOff.NumRetries(), err) -} diff --git a/pkg/experiment/metastore/raftnode/node_read.go b/pkg/experiment/metastore/raftnode/node_read.go index 0f7093b9df..7e23e022a2 100644 --- a/pkg/experiment/metastore/raftnode/node_read.go +++ b/pkg/experiment/metastore/raftnode/node_read.go @@ -5,6 +5,8 @@ import ( "errors" "fmt" "time" + + "github.com/hashicorp/raft" ) var ( @@ -13,8 +15,22 @@ var ( ErrAborted = errors.New("aborted") ) +// ReadIndex is the lower bound for the state any query must operate against. +// However, it does not guarantee snapshot isolation or an upper bound (which +// is the applied index of the state machine being queried). +// +// Refer to https://web.stanford.edu/~ouster/cgi-bin/papers/OngaroPhD.pdf, +// paragraph 6.4, "Processing read-only queries more efficiently". +type ReadIndex struct { + // CommitIndex is the index of the last log entry that was committed by + // the leader and is guaranteed to be present on all followers. + CommitIndex uint64 + // Term the leader was in when the entry was committed. + Term uint64 +} + type Leader interface { - ReadIndex() (uint64, error) + ReadIndex() (ReadIndex, error) } type FSM[Tx any] interface { @@ -37,9 +53,6 @@ type StateReader[Tx any] struct { // implements the Leader Read pattern. Otherwise, it implements the Follower // Read pattern. // -// Refer to https://web.stanford.edu/~ouster/cgi-bin/papers/OngaroPhD.pdf, -// paragraph 6.4, "Processing read-only queries more efficiently": -// // > This approach is more efficient than committing read-only queries as new // > entries in the log, since it avoids synchronous disk writes. To improve // > efficiency further, the leader can amortize the cost of confirming its @@ -95,15 +108,15 @@ func NewStateReader[Tx any]( // // It's caller's responsibility to handle errors encountered while using the // provided transaction, such as I/O errors or logical inconsistencies. -func (r *StateReader[Tx]) ConsistentRead(ctx context.Context, read func(Tx)) error { +func (r *StateReader[Tx]) ConsistentRead(ctx context.Context, read func(tx Tx, index ReadIndex)) error { if err := r.consistentRead(ctx, read); err != nil { return fmt.Errorf("%w: %w", ErrConsistentRead, err) } return nil } -func (r *StateReader[Tx]) consistentRead(ctx context.Context, read func(Tx)) error { - applied, err := r.WaitLeaderCommitIndexApplied(ctx) +func (r *StateReader[Tx]) consistentRead(ctx context.Context, read func(tx Tx, index ReadIndex)) error { + readIndex, err := r.WaitLeaderCommitIndexApplied(ctx) if err != nil { return err } @@ -115,14 +128,14 @@ func (r *StateReader[Tx]) consistentRead(ctx context.Context, read func(Tx)) err // after the index check and before the transaction begins (blocking // state restore). We perform the check again to detect this, and // abort the operation if this is the case. - if r.fsm.AppliedIndex() < applied { + if r.fsm.AppliedIndex() < readIndex.CommitIndex { readErr = ErrAborted return } - // It's guaranteed that the FSM has the most up-to-date state - // relative to the read time: any subsequent read will include - // the state we're accessing now. - read(tx) + // NOTE(kolesnikovae): The leader guarantees that the state observed is + // not older than its committed index but does not guarantee it is the + // latest possible state at the time of the read. + read(tx, readIndex) } if err = r.fsm.Read(fn); err != nil { // The FSM might not be able to perform the read operation due to the @@ -133,36 +146,121 @@ func (r *StateReader[Tx]) consistentRead(ctx context.Context, read func(Tx)) err return readErr } -func (r *StateReader[tx]) WaitLeaderCommitIndexApplied(ctx context.Context) (uint64, error) { +// WaitLeaderCommitIndexApplied blocks until the local +// applied index reaches the leader read index +func (r *StateReader[tx]) WaitLeaderCommitIndexApplied(ctx context.Context) (ReadIndex, error) { readIndex, err := r.leader.ReadIndex() if err != nil { - return 0, err + return ReadIndex{}, err } - applied, reached, err := r.checkAppliedIndex(readIndex) - if err != nil || reached { - return applied, err + return readIndex, waitIndexReached(ctx, + r.fsm.AppliedIndex, + readIndex.CommitIndex, + r.checkInterval, + int(r.maxDistance), + ) +} + +func (n *Node) ReadIndex() (ReadIndex, error) { + v, err := n.readIndex() + return v, WithRaftLeaderStatusDetails(err, n.raft) +} + +func (n *Node) AppliedIndex() uint64 { return n.raft.AppliedIndex() } + +func (n *Node) readIndex() (ReadIndex, error) { + // > If the leader has not yet marked an entry from its current term + // > committed, it waits until it has done so. The Leader Completeness + // > Property guarantees that a leader has all committed entries, but + // > at the start of its term, it may not know which those are. To find + // > out, it needs to commit an entry from its term. Raft handles this + // > by having each leader commit a blank no-op entry into the log at + // > the start of its term. As soon as this no-op entry is committed, + // > the leader’s commit index will be at least as large as any other + // > servers’ during its term. + term := n.raft.CurrentTerm() + // See the "runLeader" and "dispatchLogs" implementation (hashicorp raft) + // for details: when the leader is elected, it issues a noop, we only need + // to ensure that the entry is committed before we access the current + // commit index. This may incur substantial latency, if replicas are slow, + // but it's the only way to ensure that the leader has all committed + // entries. We also keep track of the current term to ensure that the + // leader has not changed while we were waiting for the noop to be + // committed and heartbeat messages to be exchanged. + if err := n.waitLastIndexCommitted(); err != nil { + return ReadIndex{}, err + } + commitIndex := n.raft.CommitIndex() + // > The leader needs to make sure it has not been superseded by a newer + // > leader of which it is unaware. It issues a new round of heartbeats + // > and waits for their acknowledgments from a majority of the cluster. + // > Once these acknowledgments are received, the leader knows that there + // > could not have existed a leader for a greater term at the moment it + // > sent the heartbeats. Thus, the readIndex was, at the time, the + // > largest commit index ever seen by any server in the cluster. + if err := n.raft.VerifyLeader().Error(); err != nil { + // The error includes details about the actual leader the request + // should be directed to; the client should retry the operation. + return ReadIndex{}, err } + // The CommitIndex and leader heartbeats must be in the same term. + // Otherwise, we can't guarantee that this is the leader's commit index + // (mind the ABA problem), and thus, we can't guarantee completeness. + if n.raft.CurrentTerm() != term { + // There's a chance that the leader has changed since we've checked + // the leader status. The client should retry the operation, to + // ensure correctness of the read index. + return ReadIndex{}, raft.ErrLeadershipLost + } + // The node was the leader before we saved readIndex, and no elections + // have occurred while we were confirming leadership. + return ReadIndex{CommitIndex: commitIndex, Term: term}, nil +} - t := time.NewTicker(r.checkInterval) +func (n *Node) waitLastIndexCommitted() error { + ctx, cancel := context.WithTimeout(context.Background(), n.config.ApplyTimeout) + defer cancel() + return waitIndexReached(ctx, + n.raft.CommitIndex, + n.raft.LastIndex(), + n.config.LogIndexCheckInterval, + int(n.config.ReadIndexMaxDistance), + ) +} + +// waitIndexReached blocks until a >= b. +// If b - a >= maxDistance, the function return ErrLagBehind. +// reached is guaranteed to be false, if err != nil. +func waitIndexReached( + ctx context.Context, + src func() uint64, + dst uint64, + interval time.Duration, + maxDistance int, +) error { + if reached, err := compareIndex(src, dst, maxDistance); err != nil || reached { + return err + } + t := time.NewTicker(interval) defer t.Stop() for { select { case <-ctx.Done(): - return 0, ctx.Err() + return ctx.Err() case <-t.C: - if applied, reached, err = r.checkAppliedIndex(readIndex); err != nil || reached { - return applied, err + if reached, err := compareIndex(src, dst, maxDistance); err != nil || reached { + return err } } } } -func (r *StateReader[tx]) checkAppliedIndex(readIndex uint64) (uint64, bool, error) { - applied := r.fsm.AppliedIndex() - if r.maxDistance > 0 { - if delta := int(readIndex) - int(applied); delta > int(r.maxDistance) { - return 0, false, ErrLagBehind +func compareIndex(src func() uint64, dst uint64, maxDistance int) (bool, error) { + cur := src() + if maxDistance > 0 { + if delta := int(dst) - int(cur); delta > maxDistance { + return false, ErrLagBehind } } - return applied, applied >= readIndex, nil + return cur >= dst, nil } diff --git a/pkg/experiment/metastore/raftnode/raftnodepb/raft_node.pb.go b/pkg/experiment/metastore/raftnode/raftnodepb/raft_node.pb.go index f83f71b911..341e552d84 100644 --- a/pkg/experiment/metastore/raftnode/raftnodepb/raft_node.pb.go +++ b/pkg/experiment/metastore/raftnode/raftnodepb/raft_node.pb.go @@ -118,7 +118,8 @@ type ReadIndexResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ReadIndex uint64 `protobuf:"varint,1,opt,name=read_index,json=readIndex,proto3" json:"read_index,omitempty"` + CommitIndex uint64 `protobuf:"varint,1,opt,name=commit_index,json=commitIndex,proto3" json:"commit_index,omitempty"` + Term uint64 `protobuf:"varint,2,opt,name=term,proto3" json:"term,omitempty"` } func (x *ReadIndexResponse) Reset() { @@ -153,9 +154,16 @@ func (*ReadIndexResponse) Descriptor() ([]byte, []int) { return file_experiment_metastore_raftnode_raftnodepb_raft_node_proto_rawDescGZIP(), []int{2} } -func (x *ReadIndexResponse) GetReadIndex() uint64 { +func (x *ReadIndexResponse) GetCommitIndex() uint64 { if x != nil { - return x.ReadIndex + return x.CommitIndex + } + return 0 +} + +func (x *ReadIndexResponse) GetTerm() uint64 { + if x != nil { + return x.Term } return 0 } @@ -486,67 +494,69 @@ var file_experiment_metastore_raftnode_raftnodepb_raft_node_proto_rawDesc = []by 0x64, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x12, 0x0a, 0x10, 0x52, 0x65, 0x61, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x32, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x69, 0x6e, 0x64, - 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x49, 0x6e, - 0x64, 0x65, 0x78, 0x22, 0x11, 0x0a, 0x0f, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x3b, 0x0a, 0x10, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x04, 0x6e, 0x6f, - 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x6e, - 0x6f, 0x64, 0x65, 0x22, 0xec, 0x03, 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, - 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x2d, 0x0a, - 0x12, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x61, 0x64, 0x76, 0x65, 0x72, - 0x74, 0x69, 0x73, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x49, 0x64, 0x12, - 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x49, 0x6e, 0x64, - 0x65, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x64, 0x5f, 0x69, 0x6e, - 0x64, 0x65, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x6c, 0x69, - 0x65, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x73, 0x74, 0x5f, - 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6c, 0x61, 0x73, - 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x2f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6e, 0x6f, 0x64, - 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, 0x2e, 0x0a, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, - 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6e, 0x6f, - 0x64, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x65, 0x65, 0x72, - 0x52, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x1a, 0x31, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x66, 0x0a, 0x04, 0x50, 0x65, - 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, - 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x75, 0x66, 0x66, 0x72, 0x61, - 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x75, 0x66, 0x66, 0x72, 0x61, - 0x67, 0x65, 0x32, 0xa2, 0x01, 0x0a, 0x0f, 0x52, 0x61, 0x66, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x09, 0x52, 0x65, 0x61, 0x64, 0x49, 0x6e, - 0x64, 0x65, 0x78, 0x12, 0x1b, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, - 0x52, 0x65, 0x61, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1c, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x65, 0x61, - 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x45, 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x2e, 0x72, - 0x61, 0x66, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0xa9, 0x01, 0x0a, 0x0d, 0x63, 0x6f, 0x6d, 0x2e, - 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x42, 0x0d, 0x52, 0x61, 0x66, 0x74, 0x4e, - 0x6f, 0x64, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x49, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x61, 0x66, 0x61, 0x6e, 0x61, 0x2f, 0x70, - 0x79, 0x72, 0x6f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x65, 0x78, 0x70, - 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x2f, 0x72, 0x61, 0x66, 0x74, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x72, 0x61, 0x66, 0x74, 0x6e, - 0x6f, 0x64, 0x65, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x52, 0x58, 0x58, 0xaa, 0x02, 0x08, 0x52, 0x61, - 0x66, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0xca, 0x02, 0x08, 0x52, 0x61, 0x66, 0x74, 0x4e, 0x6f, 0x64, - 0x65, 0xe2, 0x02, 0x14, 0x52, 0x61, 0x66, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x5c, 0x47, 0x50, 0x42, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x08, 0x52, 0x61, 0x66, 0x74, 0x4e, - 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x4a, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x22, 0x11, 0x0a, 0x0f, 0x4e, + 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x3b, + 0x0a, 0x10, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x27, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4e, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x22, 0xec, 0x03, 0x0a, 0x08, + 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x2d, 0x0a, 0x12, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, + 0x73, 0x65, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x11, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x6d, 0x69, + 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, + 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x70, + 0x70, 0x6c, 0x69, 0x65, 0x64, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x2f, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, + 0x2e, 0x0a, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, + 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x05, 0x70, 0x65, 0x65, 0x72, 0x73, 0x1a, + 0x31, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x1a, 0x66, 0x0a, 0x04, 0x50, 0x65, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1a, + 0x0a, 0x08, 0x73, 0x75, 0x66, 0x66, 0x72, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x73, 0x75, 0x66, 0x66, 0x72, 0x61, 0x67, 0x65, 0x32, 0xa2, 0x01, 0x0a, 0x0f, 0x52, + 0x61, 0x66, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x48, + 0x0a, 0x09, 0x52, 0x65, 0x61, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x2e, 0x72, 0x61, + 0x66, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x08, 0x4e, 0x6f, 0x64, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1b, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x4e, 0x6f, 0x64, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, + 0xa9, 0x01, 0x0a, 0x0d, 0x63, 0x6f, 0x6d, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6e, 0x6f, 0x64, + 0x65, 0x42, 0x0d, 0x52, 0x61, 0x66, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x49, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, + 0x72, 0x61, 0x66, 0x61, 0x6e, 0x61, 0x2f, 0x70, 0x79, 0x72, 0x6f, 0x73, 0x63, 0x6f, 0x70, 0x65, + 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x2f, + 0x6d, 0x65, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x61, 0x66, 0x74, 0x6e, 0x6f, + 0x64, 0x65, 0x2f, 0x72, 0x61, 0x66, 0x74, 0x6e, 0x6f, 0x64, 0x65, 0x70, 0x62, 0xa2, 0x02, 0x03, + 0x52, 0x58, 0x58, 0xaa, 0x02, 0x08, 0x52, 0x61, 0x66, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0xca, 0x02, + 0x08, 0x52, 0x61, 0x66, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0xe2, 0x02, 0x14, 0x52, 0x61, 0x66, 0x74, + 0x4e, 0x6f, 0x64, 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0xea, 0x02, 0x08, 0x52, 0x61, 0x66, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( diff --git a/pkg/experiment/metastore/raftnode/raftnodepb/raft_node.proto b/pkg/experiment/metastore/raftnode/raftnodepb/raft_node.proto index b2e2bf1ba0..e3454087a2 100644 --- a/pkg/experiment/metastore/raftnode/raftnodepb/raft_node.proto +++ b/pkg/experiment/metastore/raftnode/raftnodepb/raft_node.proto @@ -15,7 +15,8 @@ service RaftNodeService { message ReadIndexRequest {} message ReadIndexResponse { - uint64 read_index = 1; + uint64 commit_index = 1; + uint64 term = 2; } message NodeInfoRequest {} diff --git a/pkg/experiment/metastore/raftnode/raftnodepb/raft_node_vtproto.pb.go b/pkg/experiment/metastore/raftnode/raftnodepb/raft_node_vtproto.pb.go index b7c7cd0cfa..57e1b0929b 100644 --- a/pkg/experiment/metastore/raftnode/raftnodepb/raft_node_vtproto.pb.go +++ b/pkg/experiment/metastore/raftnode/raftnodepb/raft_node_vtproto.pb.go @@ -259,8 +259,13 @@ func (m *ReadIndexResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.ReadIndex != 0 { - i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ReadIndex)) + if m.Term != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Term)) + i-- + dAtA[i] = 0x10 + } + if m.CommitIndex != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CommitIndex)) i-- dAtA[i] = 0x8 } @@ -580,8 +585,11 @@ func (m *ReadIndexResponse) SizeVT() (n int) { } var l int _ = l - if m.ReadIndex != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.ReadIndex)) + if m.CommitIndex != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CommitIndex)) + } + if m.Term != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Term)) } n += len(m.unknownFields) return n @@ -897,9 +905,28 @@ func (m *ReadIndexResponse) UnmarshalVT(dAtA []byte) error { switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadIndex", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CommitIndex", wireType) + } + m.CommitIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CommitIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) } - m.ReadIndex = 0 + m.Term = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow @@ -909,7 +936,7 @@ func (m *ReadIndexResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadIndex |= uint64(b&0x7F) << shift + m.Term |= uint64(b&0x7F) << shift if b < 0x80 { break } diff --git a/pkg/experiment/metastore/raftnode/service.go b/pkg/experiment/metastore/raftnode/service.go index 0e53f976a6..8ff5adb408 100644 --- a/pkg/experiment/metastore/raftnode/service.go +++ b/pkg/experiment/metastore/raftnode/service.go @@ -7,7 +7,7 @@ import ( ) type RaftNode interface { - ReadIndex() (uint64, error) + ReadIndex() (ReadIndex, error) NodeInfo() (*raftnodepb.NodeInfo, error) } @@ -25,11 +25,15 @@ func (svc *RaftNodeService) ReadIndex( context.Context, *raftnodepb.ReadIndexRequest, ) (*raftnodepb.ReadIndexResponse, error) { - readIndex, err := svc.node.ReadIndex() + read, err := svc.node.ReadIndex() if err != nil { return nil, err } - return &raftnodepb.ReadIndexResponse{ReadIndex: readIndex}, nil + resp := &raftnodepb.ReadIndexResponse{ + CommitIndex: read.CommitIndex, + Term: read.Term, + } + return resp, nil } func (svc *RaftNodeService) NodeInfo( diff --git a/pkg/experiment/metastore/store/store.go b/pkg/experiment/metastore/store/store.go new file mode 100644 index 0000000000..aff8e72b56 --- /dev/null +++ b/pkg/experiment/metastore/store/store.go @@ -0,0 +1,44 @@ +package store + +import ( + "bytes" + "errors" + + "go.etcd.io/bbolt" +) + +var ErrorNotFound = errors.New("not found") + +type KV struct { + Key []byte + Value []byte +} + +func NewCursorIter(prefix []byte, cursor *bbolt.Cursor) *CursorIterator { + return &CursorIterator{prefix: prefix, cursor: cursor} +} + +type CursorIterator struct { + cursor *bbolt.Cursor + seek bool + prefix []byte + k, v []byte +} + +func (c *CursorIterator) Next() bool { + if !c.seek { + c.k, c.v = c.cursor.Seek(c.prefix) + c.seek = true + } else { + c.k, c.v = c.cursor.Next() + } + return c.valid() +} + +func (c *CursorIterator) valid() bool { + return c.k != nil && (len(c.prefix) == 0 || bytes.HasPrefix(c.k, c.prefix)) +} + +func (c *CursorIterator) At() KV { return KV{Key: c.k, Value: c.v} } +func (c *CursorIterator) Err() error { return nil } +func (c *CursorIterator) Close() error { return nil } diff --git a/pkg/experiment/metastore/storeutils/storeutils.go b/pkg/experiment/metastore/storeutils/storeutils.go deleted file mode 100644 index 9ffdf437ba..0000000000 --- a/pkg/experiment/metastore/storeutils/storeutils.go +++ /dev/null @@ -1,22 +0,0 @@ -package storeutils - -import ( - "encoding/binary" - - "go.etcd.io/bbolt" -) - -func ParseTenantShardBucketName(b []byte) (shard uint32, tenant string, ok bool) { - if len(b) >= 4 { - return binary.BigEndian.Uint32(b), string(b[4:]), true - } - return 0, "", false -} - -func GetOrCreateSubBucket(parent *bbolt.Bucket, name []byte) (*bbolt.Bucket, error) { - bucket := parent.Bucket(name) - if bucket == nil { - return parent.CreateBucket(name) - } - return bucket, nil -} diff --git a/pkg/experiment/metastore/tenant_service.go b/pkg/experiment/metastore/tenant_service.go index 634536a86b..a57ab19e29 100644 --- a/pkg/experiment/metastore/tenant_service.go +++ b/pkg/experiment/metastore/tenant_service.go @@ -12,6 +12,7 @@ import ( metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" "github.com/grafana/pyroscope/pkg/experiment/metastore/index" + "github.com/grafana/pyroscope/pkg/experiment/metastore/raftnode" ) // TODO(kolesnikovae): The service should not know @@ -41,9 +42,7 @@ func (svc *TenantService) GetTenant( ctx context.Context, req *metastorev1.GetTenantRequest, ) (resp *metastorev1.GetTenantResponse, err error) { - read := func(_ *bbolt.Tx) { - // Although we're not using transaction here, we need to ensure - // strong consistency of the read operation. + read := func(*bbolt.Tx, raftnode.ReadIndex) { resp, err = svc.getTenantStats(req.TenantId, ctx) } if readErr := svc.state.ConsistentRead(ctx, read); readErr != nil { diff --git a/pkg/experiment/metastore/tombstones/store/tombstone_store.go b/pkg/experiment/metastore/tombstones/store/tombstone_store.go new file mode 100644 index 0000000000..971095f7a6 --- /dev/null +++ b/pkg/experiment/metastore/tombstones/store/tombstone_store.go @@ -0,0 +1,103 @@ +package store + +import ( + "encoding/binary" + "errors" + "fmt" + + "go.etcd.io/bbolt" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/pkg/experiment/metastore/store" + "github.com/grafana/pyroscope/pkg/iter" +) + +var ErrInvalidTombstoneEntry = errors.New("invalid tombstone entry") + +var tombstoneBucketName = []byte("tombstones") + +type TombstoneEntry struct { + Index uint64 + AppendedAt int64 + *metastorev1.Tombstones +} + +type TombstoneStore struct{ bucketName []byte } + +func NewTombstoneStore() *TombstoneStore { + return &TombstoneStore{bucketName: tombstoneBucketName} +} + +func (s *TombstoneStore) CreateBuckets(tx *bbolt.Tx) error { + _, err := tx.CreateBucketIfNotExists(s.bucketName) + return err +} + +func (s *TombstoneStore) StoreTombstones(tx *bbolt.Tx, entry TombstoneEntry) error { + kv := marshalTombstoneEntry(entry) + return tx.Bucket(s.bucketName).Put(kv.Key, kv.Value) +} + +func (s *TombstoneStore) DeleteTombstones(tx *bbolt.Tx, entry TombstoneEntry) error { + return tx.Bucket(s.bucketName).Delete(marshalTombstoneEntryKey(entry)) +} + +func (s *TombstoneStore) ListEntries(tx *bbolt.Tx) iter.Iterator[TombstoneEntry] { + return newTombstoneEntriesIterator(tx.Bucket(s.bucketName)) +} + +type tombstoneEntriesIterator struct { + iter *store.CursorIterator + cur TombstoneEntry + err error +} + +func newTombstoneEntriesIterator(bucket *bbolt.Bucket) *tombstoneEntriesIterator { + return &tombstoneEntriesIterator{iter: store.NewCursorIter(nil, bucket.Cursor())} +} + +func (x *tombstoneEntriesIterator) Next() bool { + if x.err != nil || !x.iter.Next() { + return false + } + x.err = unmarshalTombstoneEntry(&x.cur, x.iter.At()) + return x.err == nil +} + +func (x *tombstoneEntriesIterator) At() TombstoneEntry { return x.cur } + +func (x *tombstoneEntriesIterator) Close() error { return x.iter.Close() } + +func (x *tombstoneEntriesIterator) Err() error { + if err := x.iter.Err(); err != nil { + return err + } + return x.err +} + +func marshalTombstoneEntry(e TombstoneEntry) store.KV { + k := marshalTombstoneEntryKey(e) + b := make([]byte, e.Tombstones.SizeVT()) + _, _ = e.Tombstones.MarshalToSizedBufferVT(b) + return store.KV{Key: k, Value: b} +} + +func marshalTombstoneEntryKey(e TombstoneEntry) []byte { + b := make([]byte, 16) + binary.BigEndian.PutUint64(b[0:8], e.Index) + binary.BigEndian.PutUint64(b[8:16], uint64(e.AppendedAt)) + return b +} + +func unmarshalTombstoneEntry(dst *TombstoneEntry, e store.KV) error { + if len(e.Key) < 16 { + return ErrInvalidTombstoneEntry + } + dst.Index = binary.BigEndian.Uint64(e.Key[0:8]) + dst.AppendedAt = int64(binary.BigEndian.Uint64(e.Key[8:16])) + dst.Tombstones = new(metastorev1.Tombstones) + if err := dst.Tombstones.UnmarshalVT(e.Value); err != nil { + return fmt.Errorf("%w: %w", ErrInvalidTombstoneEntry, err) + } + return nil +} diff --git a/pkg/experiment/metastore/tombstones/store/tombstone_store_test.go b/pkg/experiment/metastore/tombstones/store/tombstone_store_test.go new file mode 100644 index 0000000000..fe2efae40b --- /dev/null +++ b/pkg/experiment/metastore/tombstones/store/tombstone_store_test.go @@ -0,0 +1,101 @@ +package store + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/pkg/test" +) + +func TestBlockQueueStore_StoreEntry(t *testing.T) { + db := test.BoltDB(t) + + s := NewTombstoneStore() + tx, err := db.Begin(true) + require.NoError(t, err) + require.NoError(t, s.CreateBuckets(tx)) + + entries := make([]TombstoneEntry, 1000) + for i := range entries { + entries[i] = TombstoneEntry{ + Index: uint64(i), + AppendedAt: time.Now().UnixNano(), + Tombstones: &metastorev1.Tombstones{ + Blocks: &metastorev1.BlockTombstones{Name: "a"}, + }, + } + } + for i := range entries { + assert.NoError(t, s.StoreTombstones(tx, entries[i])) + } + require.NoError(t, tx.Commit()) + + s = NewTombstoneStore() + tx, err = db.Begin(false) + require.NoError(t, err) + iter := s.ListEntries(tx) + var i int + for iter.Next() { + assert.Less(t, i, len(entries)) + assert.Equal(t, entries[i], iter.At()) + i++ + } + assert.Nil(t, iter.Err()) + assert.Nil(t, iter.Close()) + require.NoError(t, tx.Rollback()) +} + +func TestTombstoneStore_DeleteQueuedEntries(t *testing.T) { + db := test.BoltDB(t) + + s := NewTombstoneStore() + tx, err := db.Begin(true) + require.NoError(t, err) + require.NoError(t, s.CreateBuckets(tx)) + + entries := make([]TombstoneEntry, 1000) + for i := range entries { + entries[i] = TombstoneEntry{ + Index: uint64(i), + AppendedAt: time.Now().UnixNano(), + Tombstones: &metastorev1.Tombstones{ + Blocks: &metastorev1.BlockTombstones{Name: "a"}, + }, + } + } + for i := range entries { + assert.NoError(t, s.StoreTombstones(tx, entries[i])) + } + require.NoError(t, tx.Commit()) + + // Delete random 25%. + tx, err = db.Begin(true) + require.NoError(t, err) + for i := 0; i < len(entries); i += 4 { + assert.NoError(t, s.DeleteTombstones(tx, entries[i])) + } + require.NoError(t, tx.Commit()) + + // Check remaining entries. + s = NewTombstoneStore() + tx, err = db.Begin(false) + require.NoError(t, err) + iter := s.ListEntries(tx) + var i int + for iter.Next() { + if i%4 == 0 { + // Skip deleted entries. + i++ + } + assert.Less(t, i, len(entries)) + assert.Equal(t, entries[i], iter.At()) + i++ + } + assert.Nil(t, iter.Err()) + assert.Nil(t, iter.Close()) + require.NoError(t, tx.Rollback()) +} diff --git a/pkg/experiment/metastore/tombstones/tombstone_queue.go b/pkg/experiment/metastore/tombstones/tombstone_queue.go new file mode 100644 index 0000000000..8c79f9e652 --- /dev/null +++ b/pkg/experiment/metastore/tombstones/tombstone_queue.go @@ -0,0 +1,75 @@ +package tombstones + +import ( + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/pkg/experiment/metastore/tombstones/store" +) + +type tombstoneQueue struct{ head, tail *tombstones } + +// The only requirement to tombstoneKey is that it must be +// unique and must be received from the raft log. +type tombstoneKey string + +func (k *tombstoneKey) set(t *metastorev1.Tombstones) bool { + if t.Blocks != nil { + *k = tombstoneKey(t.Blocks.Name) + } + return len(*k) > 0 +} + +type tombstones struct { + store.TombstoneEntry + next, prev *tombstones +} + +func newTombstoneQueue() *tombstoneQueue { return &tombstoneQueue{} } + +func (q *tombstoneQueue) push(e *tombstones) bool { + if q.tail != nil { + q.tail.next = e + e.prev = q.tail + } else { + q.head = e + } + q.tail = e + return true +} + +func (q *tombstoneQueue) delete(e *tombstones) *tombstones { + if e.prev != nil { + e.prev.next = e.next + } else { + // This is the head. + q.head = e.next + } + if e.next != nil { + e.next.prev = e.prev + } else { + // This is the tail. + q.tail = e.next + } + e.next = nil + e.prev = nil + return e +} + +type tombstoneIter struct { + head *tombstones + before int64 +} + +func (t *tombstoneIter) Next() bool { + if t.head == nil { + return false + } + if t.head = t.head.next; t.head == nil { + return false + } + return t.head.AppendedAt < t.before +} + +func (t *tombstoneIter) At() *metastorev1.Tombstones { return t.head.Tombstones } + +func (t *tombstoneIter) Err() error { return nil } +func (t *tombstoneIter) Close() error { return nil } diff --git a/pkg/experiment/metastore/tombstones/tombstones.go b/pkg/experiment/metastore/tombstones/tombstones.go new file mode 100644 index 0000000000..dc03efe1c8 --- /dev/null +++ b/pkg/experiment/metastore/tombstones/tombstones.go @@ -0,0 +1,179 @@ +package tombstones + +import ( + "time" + + "github.com/hashicorp/raft" + "go.etcd.io/bbolt" + + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + "github.com/grafana/pyroscope/pkg/experiment/metastore/tombstones/store" + "github.com/grafana/pyroscope/pkg/iter" +) + +type TombstoneStore interface { + StoreTombstones(*bbolt.Tx, store.TombstoneEntry) error + DeleteTombstones(*bbolt.Tx, store.TombstoneEntry) error + ListEntries(*bbolt.Tx) iter.Iterator[store.TombstoneEntry] + CreateBuckets(*bbolt.Tx) error +} + +type Tombstones struct { + tombstones map[tombstoneKey]*tombstones + blocks map[tenantBlockKey]*tenantBlocks + queue *tombstoneQueue + store TombstoneStore +} + +type tenantBlockKey struct { + tenant string + shard uint32 +} + +type tenantBlocks struct { + blocks map[string]struct{} +} + +func NewTombstones(store TombstoneStore) *Tombstones { + return &Tombstones{ + tombstones: make(map[tombstoneKey]*tombstones), + blocks: make(map[tenantBlockKey]*tenantBlocks), + queue: newTombstoneQueue(), + store: store, + } +} + +func NewStore() *store.TombstoneStore { + return store.NewTombstoneStore() +} + +func (x *Tombstones) Exists(md *metastorev1.BlockMeta) bool { + tenant, exists := x.blocks[tenantBlockKey{tenant: md.TenantId, shard: md.Shard}] + if exists { + _, exists = tenant.blocks[md.Id] + } + return exists +} + +func (x *Tombstones) ListTombstones(before time.Time) iter.Iterator[*metastorev1.Tombstones] { + return &tombstoneIter{ + head: x.queue.head, + before: before.UnixNano(), + } +} + +func (x *Tombstones) AddTombstones(tx *bbolt.Tx, cmd *raft.Log, t *metastorev1.Tombstones) error { + var k tombstoneKey + if !k.set(t) { + return nil + } + v := store.TombstoneEntry{ + Index: cmd.Index, + AppendedAt: cmd.AppendedAt.UnixNano(), + Tombstones: t, + } + if !x.put(k, v) { + return nil + } + return x.store.StoreTombstones(tx, v) +} + +func (x *Tombstones) DeleteTombstones(tx *bbolt.Tx, cmd *raft.Log, tombstones ...*metastorev1.Tombstones) error { + for _, t := range tombstones { + if err := x.deleteTombstones(tx, cmd, t); err != nil { + return err + } + } + return nil +} + +func (x *Tombstones) deleteTombstones(tx *bbolt.Tx, _ *raft.Log, t *metastorev1.Tombstones) error { + var k tombstoneKey + if !k.set(t) { + return nil + } + e := x.delete(k) + if e == nil { + return nil + } + return x.store.DeleteTombstones(tx, e.TombstoneEntry) +} + +func (x *Tombstones) put(k tombstoneKey, v store.TombstoneEntry) bool { + if _, found := x.tombstones[k]; found { + return false + } + e := &tombstones{TombstoneEntry: v} + x.tombstones[k] = e + if v.Tombstones.Blocks != nil { + if x.queue.push(e) { + x.putBlockTombstones(v.Tombstones.Blocks) + return true + } + } + return false +} + +func (x *Tombstones) delete(k tombstoneKey) (t *tombstones) { + e, found := x.tombstones[k] + if !found { + return nil + } + delete(x.tombstones, k) + if t = x.queue.delete(e); t != nil { + if t.Tombstones.Blocks != nil { + x.deleteBlockTombstones(t.Blocks) + } + } + return t +} + +func (x *Tombstones) putBlockTombstones(t *metastorev1.BlockTombstones) { + bk := tenantBlockKey{ + tenant: t.Tenant, + shard: t.Shard, + } + m, ok := x.blocks[bk] + if !ok { + m = &tenantBlocks{blocks: make(map[string]struct{})} + x.blocks[bk] = m + } + for _, b := range t.Blocks { + m.blocks[b] = struct{}{} + } +} + +func (x *Tombstones) deleteBlockTombstones(t *metastorev1.BlockTombstones) { + bk := tenantBlockKey{ + tenant: t.Tenant, + shard: t.Shard, + } + m, found := x.blocks[bk] + if !found { + return + } + for _, b := range t.Blocks { + delete(m.blocks, b) + } +} + +func (x *Tombstones) Init(tx *bbolt.Tx) error { + return x.store.CreateBuckets(tx) +} + +func (x *Tombstones) Restore(tx *bbolt.Tx) error { + x.queue = newTombstoneQueue() + clear(x.tombstones) + clear(x.blocks) + entries := x.store.ListEntries(tx) + defer func() { + _ = entries.Close() + }() + for entries.Next() { + var k tombstoneKey + if v := entries.At(); k.set(v.Tombstones) { + x.put(k, v) + } + } + return entries.Err() +} diff --git a/pkg/experiment/query_backend/block/compaction.go b/pkg/experiment/query_backend/block/compaction.go index 582215e2d1..7289826b11 100644 --- a/pkg/experiment/query_backend/block/compaction.go +++ b/pkg/experiment/query_backend/block/compaction.go @@ -10,7 +10,6 @@ import ( "sort" "strings" "sync" - "time" "github.com/grafana/dskit/multierror" "github.com/oklog/ulid" @@ -23,7 +22,6 @@ import ( phlaremodel "github.com/grafana/pyroscope/pkg/model" "github.com/grafana/pyroscope/pkg/objstore" "github.com/grafana/pyroscope/pkg/phlaredb/block" - schemav1 "github.com/grafana/pyroscope/pkg/phlaredb/schemas/v1" "github.com/grafana/pyroscope/pkg/phlaredb/symdb" "github.com/grafana/pyroscope/pkg/phlaredb/tsdb/index" "github.com/grafana/pyroscope/pkg/util" @@ -108,21 +106,23 @@ func PlanCompaction(objects Objects) ([]*CompactionPlan, error) { } r := objects[0] - var c uint32 + var level uint32 for _, obj := range objects { if r.meta.Shard != obj.meta.Shard { return nil, ErrShardMergeMismatch } - c = max(c, obj.meta.CompactionLevel) + level = max(level, obj.meta.CompactionLevel) } - c++ + level++ + // Assuming that the first block in the job is the oldest one. + timestamp := ulid.MustParse(r.meta.Id).Time() m := make(map[string]*CompactionPlan) for _, obj := range objects { for _, s := range obj.meta.Datasets { tm, ok := m[s.TenantId] if !ok { - tm = newBlockCompaction(s.TenantId, r.meta.Shard, c) + tm = newBlockCompaction(timestamp, s.TenantId, r.meta.Shard, level) m[s.TenantId] = tm } sm := tm.addDataset(s) @@ -152,14 +152,14 @@ type CompactionPlan struct { meta *metastorev1.BlockMeta } -func newBlockCompaction(tenantID string, shard uint32, compactionLevel uint32) *CompactionPlan { +func newBlockCompaction(unixMilli uint64, tenantID string, shard uint32, compactionLevel uint32) *CompactionPlan { return &CompactionPlan{ tenantID: tenantID, datasetMap: make(map[string]*datasetCompaction), meta: &metastorev1.BlockMeta{ FormatVersion: 1, // TODO(kolesnikovae): Make it deterministic? - Id: ulid.MustNew(uint64(time.Now().UnixMilli()), rand.Reader).String(), + Id: ulid.MustNew(unixMilli, rand.Reader).String(), TenantId: tenantID, Shard: shard, CompactionLevel: compactionLevel, @@ -171,10 +171,6 @@ func newBlockCompaction(tenantID string, shard uint32, compactionLevel uint32) * } } -func (b *CompactionPlan) Estimate() { - // TODO(kolesnikovae): Implement. -} - func (b *CompactionPlan) Compact(ctx context.Context, dst objstore.Bucket, tmpdir string) (m *metastorev1.BlockMeta, err error) { w := NewBlockWriter(dst, ObjectPath(b.meta), tmpdir) defer func() { @@ -182,8 +178,6 @@ func (b *CompactionPlan) Compact(ctx context.Context, dst objstore.Bucket, tmpdi }() // Datasets are compacted in a strict order. for _, s := range b.datasets { - s.estimate() - // TODO(kolesnikovae): Wait until the required resources are available? if err = s.compact(ctx, w); err != nil { return nil, fmt.Errorf("compacting block: %w", err) } @@ -212,29 +206,6 @@ func (b *CompactionPlan) addDataset(s *metastorev1.Dataset) *datasetCompaction { return sm } -type compactionEstimates struct { - inMemorySizeInputSymbols int64 - inMemorySizeInputIndex int64 - inMemorySizeInputProfiles int64 - - inMemorySizeOutputSymbols int64 - inMemorySizeOutputIndex int64 - inMemorySizeOutputProfiles int64 - - outputSizeIndex int64 - outputSizeSymbols int64 - outputSizeProfiles int64 -} - -func (m *compactionEstimates) inMemorySizeTotal() int64 { - return m.inMemorySizeInputSymbols + - m.inMemorySizeInputIndex + - m.inMemorySizeInputProfiles + - m.inMemorySizeOutputSymbols + - m.inMemorySizeOutputIndex + - m.inMemorySizeOutputProfiles -} - type datasetCompaction struct { meta *metastorev1.Dataset ptypes map[string]struct{} @@ -246,10 +217,9 @@ type datasetCompaction struct { symbolsRewriter *symbolsRewriter profilesWriter *profilesWriter - estimates compactionEstimates - samples uint64 - series uint64 - profiles uint64 + samples uint64 + series uint64 + profiles uint64 flushOnce sync.Once } @@ -300,56 +270,6 @@ func (m *datasetCompaction) compact(ctx context.Context, w *Writer) (err error) return nil } -// TODO(kolesnikovae): -// - Add statistics to the block meta. -// - Measure. Ideally, we should track statistics. -func (m *datasetCompaction) estimate() { - columns := len(schemav1.ProfilesSchema.Columns()) - // Datasets are to be opened concurrently. - for _, s := range m.datasets { - s1 := s.sectionSize(SectionSymbols) - // It's likely that both symbols and tsdb sections will - // be heavily deduplicated, so the actual output size will - // be smaller than we estimate – to be deduced later. - m.estimates.outputSizeSymbols += s1 - // Both the symbols and the tsdb are loaded into memory entirely. - // It's multiplied here according to experiments. - // https://gist.github.com/kolesnikovae/6f7bdc0b8a14174a8e63485300144b4a - m.estimates.inMemorySizeInputSymbols += s1 * 3 // Pessimistic estimate. - - s2 := s.sectionSize(SectionTSDB) - m.estimates.outputSizeIndex += s2 - // TSDB index is loaded into memory entirely, but is not decoded. - m.estimates.inMemorySizeInputIndex += int64(nextPowerOfTwo(uint32(s2))) - - s3 := s.sectionSize(SectionProfiles) - m.estimates.outputSizeProfiles += s3 - // All columns are to be opened. - // Assuming async read mode – 2 buffers per column: - m.estimates.inMemorySizeInputProfiles += int64(2 * columns * estimateReadBufferSize(s3)) - } - const symbolsDuplicationRatio = 0.5 // Two blocks are likely to have a half of symbols in common. - m.estimates.outputSizeSymbols = int64(float64(m.estimates.outputSizeSymbols) * symbolsDuplicationRatio) - // Duplication of series and profiles is ignored. - - // Output block memory footprint. - m.estimates.inMemorySizeOutputIndex = m.estimates.outputSizeIndex * 8 // A guess. We keep all labels in memory. - m.estimates.inMemorySizeOutputSymbols += m.estimates.outputSizeProfiles * 4 // Mind the lookup table of rewriter. - // This is the most difficult part to estimate. - // Parquet keeps ALL RG pages in memory. We have a limit of 10K rows per RG, - // therefore it's very likely, that the whole table will be loaded into memory, - // plus overhead of memory fragmentation. It's likely impossible to have a - // reasonable estimate here. - const rowSizeGuess = 2 << 10 - // Worst case should be appx ~32MB. If a doubled estimated output size is less than that, use it. - columnBuffers := int64(nextPowerOfTwo(maxRowsPerRowGroup * rowSizeGuess)) - if s := 2 * m.estimates.outputSizeProfiles; s < columnBuffers { - columnBuffers = s - } - pageBuffers := int64(columns * estimatePageBufferSize(m.estimates.outputSizeProfiles)) - m.estimates.inMemorySizeOutputProfiles += columnBuffers + pageBuffers -} - func (m *datasetCompaction) open(ctx context.Context, path string) (err error) { m.path = path defer func() { @@ -362,7 +282,12 @@ func (m *datasetCompaction) open(ctx context.Context, path string) (err error) { return err } - m.profilesWriter, err = newProfileWriter(m.path, m.estimates.outputSizeProfiles) + var estimatedProfileTableSize int64 + for _, ds := range m.datasets { + estimatedProfileTableSize += ds.sectionSize(SectionProfiles) + } + pageBufferSize := estimatePageBufferSize(estimatedProfileTableSize) + m.profilesWriter, err = newProfileWriter(m.path, pageBufferSize) if err != nil { return err } diff --git a/pkg/experiment/query_backend/block/compaction_test.go b/pkg/experiment/query_backend/block/compaction_test.go index fa5d64ff55..e2c9ac8c32 100644 --- a/pkg/experiment/query_backend/block/compaction_test.go +++ b/pkg/experiment/query_backend/block/compaction_test.go @@ -17,14 +17,14 @@ func Test_CompactBlocks(t *testing.T) { ctx := context.Background() bucket, _ := testutil.NewFilesystemBucket(t, ctx, "testdata") - var blockMetas metastorev1.CompletedJob // same contract, can break in the future - blockMetasData, err := os.ReadFile("testdata/block-metas.json") + var resp metastorev1.GetBlockMetadataResponse + raw, err := os.ReadFile("testdata/block-metas.json") require.NoError(t, err) - err = protojson.Unmarshal(blockMetasData, &blockMetas) + err = protojson.Unmarshal(raw, &resp) require.NoError(t, err) dst, tempdir := testutil.NewFilesystemBucket(t, ctx, t.TempDir()) - compactedBlocks, err := Compact(ctx, blockMetas.Blocks, bucket, + compactedBlocks, err := Compact(ctx, resp.Blocks, bucket, WithCompactionDestination(dst), WithCompactionTempDir(tempdir), WithCompactionObjectOptions( diff --git a/pkg/experiment/query_backend/block/object.go b/pkg/experiment/query_backend/block/object.go index 738c7ea2da..95f2e0a81e 100644 --- a/pkg/experiment/query_backend/block/object.go +++ b/pkg/experiment/query_backend/block/object.go @@ -109,19 +109,23 @@ func NewObject(storage objstore.Bucket, meta *metastorev1.BlockMeta, opts ...Obj } func ObjectPath(md *metastorev1.BlockMeta) string { + return BuildObjectPath(md.TenantId, md.Shard, md.CompactionLevel, md.Id) +} + +func BuildObjectPath(tenant string, shard uint32, level uint32, block string) string { topLevel := DirPathBlock - tenantDirName := md.TenantId - if md.CompactionLevel == 0 { + tenantDirName := tenant + if level == 0 { topLevel = DirPathSegment tenantDirName = DirNameAnonTenant } var b strings.Builder b.WriteString(topLevel) - b.WriteString(strconv.Itoa(int(md.Shard))) + b.WriteString(strconv.Itoa(int(shard))) b.WriteByte('/') b.WriteString(tenantDirName) b.WriteByte('/') - b.WriteString(md.Id) + b.WriteString(block) b.WriteByte('/') b.WriteString(FileNameDataObject) return b.String() diff --git a/pkg/experiment/query_backend/block/section_profiles.go b/pkg/experiment/query_backend/block/section_profiles.go index 031bcbf78b..3ec4bda74d 100644 --- a/pkg/experiment/query_backend/block/section_profiles.go +++ b/pkg/experiment/query_backend/block/section_profiles.go @@ -164,7 +164,7 @@ type profilesWriter struct { profiles uint64 } -func newProfileWriter(dst string, sizeTotal int64) (*profilesWriter, error) { +func newProfileWriter(dst string, pageBufferSize int) (*profilesWriter, error) { f, err := os.Create(filepath.Join(dst, FileNameProfilesParquet)) if err != nil { return nil, err @@ -174,7 +174,7 @@ func newProfileWriter(dst string, sizeTotal int64) (*profilesWriter, error) { buf: make([]parquet.Row, 1), GenericWriter: parquet.NewGenericWriter[*schemav1.Profile](f, parquet.CreatedBy("github.com/grafana/pyroscope/", build.Version, build.Revision), - parquet.PageBufferSize(estimatePageBufferSize(sizeTotal)), + parquet.PageBufferSize(pageBufferSize), // Note that parquet keeps ALL RG pages in memory (ColumnPageBuffers). parquet.MaxRowsPerRowGroup(maxRowsPerRowGroup), schemav1.ProfilesSchema, diff --git a/pkg/experiment/query_backend/block_reader.go b/pkg/experiment/query_backend/block_reader.go index 4148ff5483..5a8a5326f7 100644 --- a/pkg/experiment/query_backend/block_reader.go +++ b/pkg/experiment/query_backend/block_reader.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/opentracing/opentracing-go" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" @@ -28,7 +29,7 @@ import ( // // A single Invoke request typically spans multiple blocks (objects). // Querying an object involves processing multiple datasets in parallel. -// Multiple parallel queries can be executed on the same tenant datasets. +// Multiple parallel queries can be executed on the same tenant dataset. // // Thus, queries share the same "execution context": the object and a tenant // dataset. @@ -68,32 +69,41 @@ func (b *BlockReader) Invoke( ) (*queryv1.InvokeResponse, error) { span, ctx := opentracing.StartSpanFromContext(ctx, "BlockReader.Invoke") defer span.Finish() - vr, err := validateRequest(req) + r, err := validateRequest(req) if err != nil { return nil, status.Errorf(codes.InvalidArgument, "request validation failed: %v", err) } + g, ctx := errgroup.WithContext(ctx) - m := newAggregator(req) + agg := newAggregator(req) + + qcs := make([]*queryContext, 0, len(req.Query)*len(req.QueryPlan.Root.Blocks)) for _, md := range req.QueryPlan.Root.Blocks { - obj := block.NewObject(b.storage, md) - for _, meta := range md.Datasets { - c := newQueryContext(ctx, b.log, meta, vr, obj) - for _, query := range req.Query { - q := query - g.Go(util.RecoverPanic(func() error { - r, err := executeQuery(c, q) - if err != nil { - return err - } - return m.aggregateReport(r) - })) - } + object := block.NewObject(b.storage, md) + for _, ds := range md.Datasets { + dataset := block.NewDataset(ds, object) + qcs = append(qcs, newQueryContext(ctx, b.log, r, agg, dataset)) + } + } + + for _, c := range qcs { + for _, query := range req.Query { + q := query + g.Go(util.RecoverPanic(func() error { + execErr := executeQuery(c, q) + if execErr != nil && objstore.IsNotExist(b.storage, execErr) { + level.Warn(b.log).Log("msg", "object not found", "err", execErr) + return nil + } + return execErr + })) } } + if err = g.Wait(); err != nil { return nil, err } - return m.response() + return agg.response() } type request struct { diff --git a/pkg/experiment/query_backend/query.go b/pkg/experiment/query_backend/query.go index 6676713a87..242037ca75 100644 --- a/pkg/experiment/query_backend/query.go +++ b/pkg/experiment/query_backend/query.go @@ -9,7 +9,6 @@ import ( "github.com/iancoleman/strcase" "github.com/opentracing/opentracing-go" - metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" queryv1 "github.com/grafana/pyroscope/api/gen/proto/go/query/v1" "github.com/grafana/pyroscope/pkg/experiment/query_backend/block" ) @@ -72,50 +71,53 @@ func registerQueryType( } type queryContext struct { - ctx context.Context - log log.Logger - meta *metastorev1.Dataset - req *request - obj *block.Object - ds *block.Dataset - err error + ctx context.Context + log log.Logger + req *request + agg *reportAggregator + ds *block.Dataset + err error } func newQueryContext( ctx context.Context, - logger log.Logger, - meta *metastorev1.Dataset, + log log.Logger, req *request, - obj *block.Object, + agg *reportAggregator, + ds *block.Dataset, ) *queryContext { return &queryContext{ - ctx: ctx, - log: logger, - req: req, - meta: meta, - obj: obj, - ds: block.NewDataset(meta, obj), + ctx: ctx, + log: log, + req: req, + agg: agg, + ds: ds, } } -func executeQuery(q *queryContext, query *queryv1.Query) (r *queryv1.Report, err error) { +func executeQuery(q *queryContext, query *queryv1.Query) error { var span opentracing.Span span, q.ctx = opentracing.StartSpanFromContext(q.ctx, "executeQuery."+strcase.ToCamel(query.QueryType.String())) defer span.Finish() handle, err := getQueryHandler(query.QueryType) if err != nil { - return nil, err + return err } if err = q.open(); err != nil { - return nil, fmt.Errorf("failed to initialize query context: %w", err) + return fmt.Errorf("failed to initialize query context: %w", err) } defer func() { _ = q.close(err) }() - if r, err = handle(q, query); r != nil { + r, err := handle(q, query) + if err != nil { + return err + } + if r != nil { r.ReportType = QueryReportType(query.QueryType) + return q.agg.aggregateReport(r) } - return r, err + return nil } func (q *queryContext) open() error { diff --git a/pkg/objstore/not_found.go b/pkg/objstore/not_found.go new file mode 100644 index 0000000000..c9821a7552 --- /dev/null +++ b/pkg/objstore/not_found.go @@ -0,0 +1,23 @@ +package objstore + +import ( + "errors" + + "github.com/thanos-io/objstore" +) + +func IsNotExist(b objstore.BucketReader, err error) bool { + // objstore relies on the Causer interface + // and does not understand wrapped errors. + return b.IsObjNotFoundErr(UnwrapErr(err)) +} + +func UnwrapErr(err error) error { + for { + unwrapped := errors.Unwrap(err) + if unwrapped == nil { + return err + } + err = unwrapped + } +} diff --git a/pkg/phlare/modules_experimental.go b/pkg/phlare/modules_experimental.go index beb1e9f7fb..7d7b83d057 100644 --- a/pkg/phlare/modules_experimental.go +++ b/pkg/phlare/modules_experimental.go @@ -86,21 +86,20 @@ func (f *Phlare) initSegmentWriterClient() (_ services.Service, err error) { } func (f *Phlare) initCompactionWorker() (svc services.Service, err error) { - if err = f.Cfg.CompactionWorker.Validate(); err != nil { - return nil, err - } logger := log.With(f.logger, "component", "compaction-worker") - f.compactionWorker, err = compactionworker.New( - f.Cfg.CompactionWorker, + registerer := prometheus.WrapRegistererWithPrefix("pyroscope_compaction_worker_", f.reg) + w, err := compactionworker.New( logger, + f.Cfg.CompactionWorker, f.metastoreClient, f.storageBucket, - f.reg, + registerer, ) if err != nil { return nil, err } - return f.compactionWorker, nil + f.compactionWorker = w + return w.Service(), nil } func (f *Phlare) initMetastore() (services.Service, error) { @@ -110,10 +109,11 @@ func (f *Phlare) initMetastore() (services.Service, error) { logger := log.With(f.logger, "component", "metastore") healthService := health.NewGRPCHealthService(f.healthServer, logger, "pyroscope.metastore") + registerer := prometheus.WrapRegistererWithPrefix("pyroscope_metastore_", f.reg) m, err := metastore.New( f.Cfg.Metastore, logger, - f.reg, + registerer, healthService, f.metastoreClient, f.storageBucket, diff --git a/pkg/test/boltdb.go b/pkg/test/boltdb.go new file mode 100644 index 0000000000..abbfc1fe6a --- /dev/null +++ b/pkg/test/boltdb.go @@ -0,0 +1,23 @@ +package test + +import ( + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + "go.etcd.io/bbolt" +) + +func BoltDB(t *testing.T) *bbolt.DB { + tempDir := t.TempDir() + opts := bbolt.Options{ + NoGrowSync: true, + NoFreelistSync: true, + FreelistType: bbolt.FreelistMapType, + InitialMmapSize: 32 << 20, + NoSync: true, + } + db, err := bbolt.Open(filepath.Join(tempDir, "boltdb"), 0644, &opts) + require.NoError(t, err) + return db +} diff --git a/pkg/test/idempotence.go b/pkg/test/idempotence.go new file mode 100644 index 0000000000..de69cdfbff --- /dev/null +++ b/pkg/test/idempotence.go @@ -0,0 +1,34 @@ +package test + +import ( + "testing" +) + +// AssertIdempotent asserts that the test is valid when run multiple times. +func AssertIdempotent(t *testing.T, fn func(*testing.T)) { + t.Helper() + for i := 0; i < 2; i++ { + fn(t) + if t.Failed() { + if i > 0 { + t.Fatal("the function is not idempotent") + } + return + } + } +} + +func AssertIdempotentSubtest(t *testing.T, fn func(*testing.T)) func(*testing.T) { + t.Helper() + return func(t *testing.T) { + for i := 0; i < 2; i++ { + fn(t) + if t.Failed() { + if i > 0 { + t.Fatal("the function is not idempotent") + } + return + } + } + } +} diff --git a/pkg/test/mocks/mockcompactor/mock_block_queue_store.go b/pkg/test/mocks/mockcompactor/mock_block_queue_store.go new file mode 100644 index 0000000000..91fd024a5e --- /dev/null +++ b/pkg/test/mocks/mockcompactor/mock_block_queue_store.go @@ -0,0 +1,229 @@ +// Code generated by mockery. DO NOT EDIT. + +package mockcompactor + +import ( + bbolt "go.etcd.io/bbolt" + + iter "github.com/grafana/pyroscope/pkg/iter" + + mock "github.com/stretchr/testify/mock" + + store "github.com/grafana/pyroscope/pkg/experiment/metastore/compaction/compactor/store" +) + +// MockBlockQueueStore is an autogenerated mock type for the BlockQueueStore type +type MockBlockQueueStore struct { + mock.Mock +} + +type MockBlockQueueStore_Expecter struct { + mock *mock.Mock +} + +func (_m *MockBlockQueueStore) EXPECT() *MockBlockQueueStore_Expecter { + return &MockBlockQueueStore_Expecter{mock: &_m.Mock} +} + +// CreateBuckets provides a mock function with given fields: _a0 +func (_m *MockBlockQueueStore) CreateBuckets(_a0 *bbolt.Tx) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for CreateBuckets") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*bbolt.Tx) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockBlockQueueStore_CreateBuckets_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateBuckets' +type MockBlockQueueStore_CreateBuckets_Call struct { + *mock.Call +} + +// CreateBuckets is a helper method to define mock.On call +// - _a0 *bbolt.Tx +func (_e *MockBlockQueueStore_Expecter) CreateBuckets(_a0 interface{}) *MockBlockQueueStore_CreateBuckets_Call { + return &MockBlockQueueStore_CreateBuckets_Call{Call: _e.mock.On("CreateBuckets", _a0)} +} + +func (_c *MockBlockQueueStore_CreateBuckets_Call) Run(run func(_a0 *bbolt.Tx)) *MockBlockQueueStore_CreateBuckets_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*bbolt.Tx)) + }) + return _c +} + +func (_c *MockBlockQueueStore_CreateBuckets_Call) Return(_a0 error) *MockBlockQueueStore_CreateBuckets_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockBlockQueueStore_CreateBuckets_Call) RunAndReturn(run func(*bbolt.Tx) error) *MockBlockQueueStore_CreateBuckets_Call { + _c.Call.Return(run) + return _c +} + +// DeleteEntry provides a mock function with given fields: tx, index, id +func (_m *MockBlockQueueStore) DeleteEntry(tx *bbolt.Tx, index uint64, id string) error { + ret := _m.Called(tx, index, id) + + if len(ret) == 0 { + panic("no return value specified for DeleteEntry") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*bbolt.Tx, uint64, string) error); ok { + r0 = rf(tx, index, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockBlockQueueStore_DeleteEntry_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteEntry' +type MockBlockQueueStore_DeleteEntry_Call struct { + *mock.Call +} + +// DeleteEntry is a helper method to define mock.On call +// - tx *bbolt.Tx +// - index uint64 +// - id string +func (_e *MockBlockQueueStore_Expecter) DeleteEntry(tx interface{}, index interface{}, id interface{}) *MockBlockQueueStore_DeleteEntry_Call { + return &MockBlockQueueStore_DeleteEntry_Call{Call: _e.mock.On("DeleteEntry", tx, index, id)} +} + +func (_c *MockBlockQueueStore_DeleteEntry_Call) Run(run func(tx *bbolt.Tx, index uint64, id string)) *MockBlockQueueStore_DeleteEntry_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*bbolt.Tx), args[1].(uint64), args[2].(string)) + }) + return _c +} + +func (_c *MockBlockQueueStore_DeleteEntry_Call) Return(_a0 error) *MockBlockQueueStore_DeleteEntry_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockBlockQueueStore_DeleteEntry_Call) RunAndReturn(run func(*bbolt.Tx, uint64, string) error) *MockBlockQueueStore_DeleteEntry_Call { + _c.Call.Return(run) + return _c +} + +// ListEntries provides a mock function with given fields: _a0 +func (_m *MockBlockQueueStore) ListEntries(_a0 *bbolt.Tx) iter.Iterator[store.BlockEntry] { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for ListEntries") + } + + var r0 iter.Iterator[store.BlockEntry] + if rf, ok := ret.Get(0).(func(*bbolt.Tx) iter.Iterator[store.BlockEntry]); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(iter.Iterator[store.BlockEntry]) + } + } + + return r0 +} + +// MockBlockQueueStore_ListEntries_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntries' +type MockBlockQueueStore_ListEntries_Call struct { + *mock.Call +} + +// ListEntries is a helper method to define mock.On call +// - _a0 *bbolt.Tx +func (_e *MockBlockQueueStore_Expecter) ListEntries(_a0 interface{}) *MockBlockQueueStore_ListEntries_Call { + return &MockBlockQueueStore_ListEntries_Call{Call: _e.mock.On("ListEntries", _a0)} +} + +func (_c *MockBlockQueueStore_ListEntries_Call) Run(run func(_a0 *bbolt.Tx)) *MockBlockQueueStore_ListEntries_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*bbolt.Tx)) + }) + return _c +} + +func (_c *MockBlockQueueStore_ListEntries_Call) Return(_a0 iter.Iterator[store.BlockEntry]) *MockBlockQueueStore_ListEntries_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockBlockQueueStore_ListEntries_Call) RunAndReturn(run func(*bbolt.Tx) iter.Iterator[store.BlockEntry]) *MockBlockQueueStore_ListEntries_Call { + _c.Call.Return(run) + return _c +} + +// StoreEntry provides a mock function with given fields: _a0, _a1 +func (_m *MockBlockQueueStore) StoreEntry(_a0 *bbolt.Tx, _a1 store.BlockEntry) error { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for StoreEntry") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*bbolt.Tx, store.BlockEntry) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockBlockQueueStore_StoreEntry_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StoreEntry' +type MockBlockQueueStore_StoreEntry_Call struct { + *mock.Call +} + +// StoreEntry is a helper method to define mock.On call +// - _a0 *bbolt.Tx +// - _a1 store.BlockEntry +func (_e *MockBlockQueueStore_Expecter) StoreEntry(_a0 interface{}, _a1 interface{}) *MockBlockQueueStore_StoreEntry_Call { + return &MockBlockQueueStore_StoreEntry_Call{Call: _e.mock.On("StoreEntry", _a0, _a1)} +} + +func (_c *MockBlockQueueStore_StoreEntry_Call) Run(run func(_a0 *bbolt.Tx, _a1 store.BlockEntry)) *MockBlockQueueStore_StoreEntry_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*bbolt.Tx), args[1].(store.BlockEntry)) + }) + return _c +} + +func (_c *MockBlockQueueStore_StoreEntry_Call) Return(_a0 error) *MockBlockQueueStore_StoreEntry_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockBlockQueueStore_StoreEntry_Call) RunAndReturn(run func(*bbolt.Tx, store.BlockEntry) error) *MockBlockQueueStore_StoreEntry_Call { + _c.Call.Return(run) + return _c +} + +// NewMockBlockQueueStore creates a new instance of MockBlockQueueStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockBlockQueueStore(t interface { + mock.TestingT + Cleanup(func()) +}) *MockBlockQueueStore { + mock := &MockBlockQueueStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/test/mocks/mockcompactor/mock_tombstones.go b/pkg/test/mocks/mockcompactor/mock_tombstones.go new file mode 100644 index 0000000000..00f54ec258 --- /dev/null +++ b/pkg/test/mocks/mockcompactor/mock_tombstones.go @@ -0,0 +1,86 @@ +// Code generated by mockery. DO NOT EDIT. + +package mockcompactor + +import ( + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + iter "github.com/grafana/pyroscope/pkg/iter" + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// MockTombstones is an autogenerated mock type for the Tombstones type +type MockTombstones struct { + mock.Mock +} + +type MockTombstones_Expecter struct { + mock *mock.Mock +} + +func (_m *MockTombstones) EXPECT() *MockTombstones_Expecter { + return &MockTombstones_Expecter{mock: &_m.Mock} +} + +// ListTombstones provides a mock function with given fields: before +func (_m *MockTombstones) ListTombstones(before time.Time) iter.Iterator[*metastorev1.Tombstones] { + ret := _m.Called(before) + + if len(ret) == 0 { + panic("no return value specified for ListTombstones") + } + + var r0 iter.Iterator[*metastorev1.Tombstones] + if rf, ok := ret.Get(0).(func(time.Time) iter.Iterator[*metastorev1.Tombstones]); ok { + r0 = rf(before) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(iter.Iterator[*metastorev1.Tombstones]) + } + } + + return r0 +} + +// MockTombstones_ListTombstones_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListTombstones' +type MockTombstones_ListTombstones_Call struct { + *mock.Call +} + +// ListTombstones is a helper method to define mock.On call +// - before time.Time +func (_e *MockTombstones_Expecter) ListTombstones(before interface{}) *MockTombstones_ListTombstones_Call { + return &MockTombstones_ListTombstones_Call{Call: _e.mock.On("ListTombstones", before)} +} + +func (_c *MockTombstones_ListTombstones_Call) Run(run func(before time.Time)) *MockTombstones_ListTombstones_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(time.Time)) + }) + return _c +} + +func (_c *MockTombstones_ListTombstones_Call) Return(_a0 iter.Iterator[*metastorev1.Tombstones]) *MockTombstones_ListTombstones_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockTombstones_ListTombstones_Call) RunAndReturn(run func(time.Time) iter.Iterator[*metastorev1.Tombstones]) *MockTombstones_ListTombstones_Call { + _c.Call.Return(run) + return _c +} + +// NewMockTombstones creates a new instance of MockTombstones. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockTombstones(t interface { + mock.TestingT + Cleanup(func()) +}) *MockTombstones { + mock := &MockTombstones{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/test/mocks/mockindex/mock_store.go b/pkg/test/mocks/mockindex/mock_store.go index 5f5ac426ae..c6fdf16abf 100644 --- a/pkg/test/mocks/mockindex/mock_store.go +++ b/pkg/test/mocks/mockindex/mock_store.go @@ -3,11 +3,13 @@ package mockindex import ( - metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" - index "github.com/grafana/pyroscope/pkg/experiment/metastore/index" bbolt "go.etcd.io/bbolt" + metastorev1 "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1" + mock "github.com/stretchr/testify/mock" + + store "github.com/grafana/pyroscope/pkg/experiment/metastore/index/store" ) // MockStore is an autogenerated mock type for the Store type @@ -23,8 +25,102 @@ func (_m *MockStore) EXPECT() *MockStore_Expecter { return &MockStore_Expecter{mock: &_m.Mock} } +// CreateBuckets provides a mock function with given fields: _a0 +func (_m *MockStore) CreateBuckets(_a0 *bbolt.Tx) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for CreateBuckets") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*bbolt.Tx) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockStore_CreateBuckets_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateBuckets' +type MockStore_CreateBuckets_Call struct { + *mock.Call +} + +// CreateBuckets is a helper method to define mock.On call +// - _a0 *bbolt.Tx +func (_e *MockStore_Expecter) CreateBuckets(_a0 interface{}) *MockStore_CreateBuckets_Call { + return &MockStore_CreateBuckets_Call{Call: _e.mock.On("CreateBuckets", _a0)} +} + +func (_c *MockStore_CreateBuckets_Call) Run(run func(_a0 *bbolt.Tx)) *MockStore_CreateBuckets_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*bbolt.Tx)) + }) + return _c +} + +func (_c *MockStore_CreateBuckets_Call) Return(_a0 error) *MockStore_CreateBuckets_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockStore_CreateBuckets_Call) RunAndReturn(run func(*bbolt.Tx) error) *MockStore_CreateBuckets_Call { + _c.Call.Return(run) + return _c +} + +// DeleteBlockList provides a mock function with given fields: _a0, _a1, _a2 +func (_m *MockStore) DeleteBlockList(_a0 *bbolt.Tx, _a1 store.PartitionKey, _a2 *metastorev1.BlockList) error { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for DeleteBlockList") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*bbolt.Tx, store.PartitionKey, *metastorev1.BlockList) error); ok { + r0 = rf(_a0, _a1, _a2) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockStore_DeleteBlockList_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteBlockList' +type MockStore_DeleteBlockList_Call struct { + *mock.Call +} + +// DeleteBlockList is a helper method to define mock.On call +// - _a0 *bbolt.Tx +// - _a1 store.PartitionKey +// - _a2 *metastorev1.BlockList +func (_e *MockStore_Expecter) DeleteBlockList(_a0 interface{}, _a1 interface{}, _a2 interface{}) *MockStore_DeleteBlockList_Call { + return &MockStore_DeleteBlockList_Call{Call: _e.mock.On("DeleteBlockList", _a0, _a1, _a2)} +} + +func (_c *MockStore_DeleteBlockList_Call) Run(run func(_a0 *bbolt.Tx, _a1 store.PartitionKey, _a2 *metastorev1.BlockList)) *MockStore_DeleteBlockList_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*bbolt.Tx), args[1].(store.PartitionKey), args[2].(*metastorev1.BlockList)) + }) + return _c +} + +func (_c *MockStore_DeleteBlockList_Call) Return(_a0 error) *MockStore_DeleteBlockList_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockStore_DeleteBlockList_Call) RunAndReturn(run func(*bbolt.Tx, store.PartitionKey, *metastorev1.BlockList) error) *MockStore_DeleteBlockList_Call { + _c.Call.Return(run) + return _c +} + // ListBlocks provides a mock function with given fields: tx, p, shard, tenant -func (_m *MockStore) ListBlocks(tx *bbolt.Tx, p index.PartitionKey, shard uint32, tenant string) []*metastorev1.BlockMeta { +func (_m *MockStore) ListBlocks(tx *bbolt.Tx, p store.PartitionKey, shard uint32, tenant string) []*metastorev1.BlockMeta { ret := _m.Called(tx, p, shard, tenant) if len(ret) == 0 { @@ -32,7 +128,7 @@ func (_m *MockStore) ListBlocks(tx *bbolt.Tx, p index.PartitionKey, shard uint32 } var r0 []*metastorev1.BlockMeta - if rf, ok := ret.Get(0).(func(*bbolt.Tx, index.PartitionKey, uint32, string) []*metastorev1.BlockMeta); ok { + if rf, ok := ret.Get(0).(func(*bbolt.Tx, store.PartitionKey, uint32, string) []*metastorev1.BlockMeta); ok { r0 = rf(tx, p, shard, tenant) } else { if ret.Get(0) != nil { @@ -50,16 +146,16 @@ type MockStore_ListBlocks_Call struct { // ListBlocks is a helper method to define mock.On call // - tx *bbolt.Tx -// - p index.PartitionKey +// - p store.PartitionKey // - shard uint32 // - tenant string func (_e *MockStore_Expecter) ListBlocks(tx interface{}, p interface{}, shard interface{}, tenant interface{}) *MockStore_ListBlocks_Call { return &MockStore_ListBlocks_Call{Call: _e.mock.On("ListBlocks", tx, p, shard, tenant)} } -func (_c *MockStore_ListBlocks_Call) Run(run func(tx *bbolt.Tx, p index.PartitionKey, shard uint32, tenant string)) *MockStore_ListBlocks_Call { +func (_c *MockStore_ListBlocks_Call) Run(run func(tx *bbolt.Tx, p store.PartitionKey, shard uint32, tenant string)) *MockStore_ListBlocks_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(*bbolt.Tx), args[1].(index.PartitionKey), args[2].(uint32), args[3].(string)) + run(args[0].(*bbolt.Tx), args[1].(store.PartitionKey), args[2].(uint32), args[3].(string)) }) return _c } @@ -69,25 +165,25 @@ func (_c *MockStore_ListBlocks_Call) Return(_a0 []*metastorev1.BlockMeta) *MockS return _c } -func (_c *MockStore_ListBlocks_Call) RunAndReturn(run func(*bbolt.Tx, index.PartitionKey, uint32, string) []*metastorev1.BlockMeta) *MockStore_ListBlocks_Call { +func (_c *MockStore_ListBlocks_Call) RunAndReturn(run func(*bbolt.Tx, store.PartitionKey, uint32, string) []*metastorev1.BlockMeta) *MockStore_ListBlocks_Call { _c.Call.Return(run) return _c } -// ListPartitions provides a mock function with given fields: tx -func (_m *MockStore) ListPartitions(tx *bbolt.Tx) []index.PartitionKey { - ret := _m.Called(tx) +// ListPartitions provides a mock function with given fields: _a0 +func (_m *MockStore) ListPartitions(_a0 *bbolt.Tx) []store.PartitionKey { + ret := _m.Called(_a0) if len(ret) == 0 { panic("no return value specified for ListPartitions") } - var r0 []index.PartitionKey - if rf, ok := ret.Get(0).(func(*bbolt.Tx) []index.PartitionKey); ok { - r0 = rf(tx) + var r0 []store.PartitionKey + if rf, ok := ret.Get(0).(func(*bbolt.Tx) []store.PartitionKey); ok { + r0 = rf(_a0) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]index.PartitionKey) + r0 = ret.Get(0).([]store.PartitionKey) } } @@ -100,39 +196,39 @@ type MockStore_ListPartitions_Call struct { } // ListPartitions is a helper method to define mock.On call -// - tx *bbolt.Tx -func (_e *MockStore_Expecter) ListPartitions(tx interface{}) *MockStore_ListPartitions_Call { - return &MockStore_ListPartitions_Call{Call: _e.mock.On("ListPartitions", tx)} +// - _a0 *bbolt.Tx +func (_e *MockStore_Expecter) ListPartitions(_a0 interface{}) *MockStore_ListPartitions_Call { + return &MockStore_ListPartitions_Call{Call: _e.mock.On("ListPartitions", _a0)} } -func (_c *MockStore_ListPartitions_Call) Run(run func(tx *bbolt.Tx)) *MockStore_ListPartitions_Call { +func (_c *MockStore_ListPartitions_Call) Run(run func(_a0 *bbolt.Tx)) *MockStore_ListPartitions_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(*bbolt.Tx)) }) return _c } -func (_c *MockStore_ListPartitions_Call) Return(_a0 []index.PartitionKey) *MockStore_ListPartitions_Call { +func (_c *MockStore_ListPartitions_Call) Return(_a0 []store.PartitionKey) *MockStore_ListPartitions_Call { _c.Call.Return(_a0) return _c } -func (_c *MockStore_ListPartitions_Call) RunAndReturn(run func(*bbolt.Tx) []index.PartitionKey) *MockStore_ListPartitions_Call { +func (_c *MockStore_ListPartitions_Call) RunAndReturn(run func(*bbolt.Tx) []store.PartitionKey) *MockStore_ListPartitions_Call { _c.Call.Return(run) return _c } -// ListShards provides a mock function with given fields: tx, p -func (_m *MockStore) ListShards(tx *bbolt.Tx, p index.PartitionKey) []uint32 { - ret := _m.Called(tx, p) +// ListShards provides a mock function with given fields: _a0, _a1 +func (_m *MockStore) ListShards(_a0 *bbolt.Tx, _a1 store.PartitionKey) []uint32 { + ret := _m.Called(_a0, _a1) if len(ret) == 0 { panic("no return value specified for ListShards") } var r0 []uint32 - if rf, ok := ret.Get(0).(func(*bbolt.Tx, index.PartitionKey) []uint32); ok { - r0 = rf(tx, p) + if rf, ok := ret.Get(0).(func(*bbolt.Tx, store.PartitionKey) []uint32); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]uint32) @@ -148,15 +244,15 @@ type MockStore_ListShards_Call struct { } // ListShards is a helper method to define mock.On call -// - tx *bbolt.Tx -// - p index.PartitionKey -func (_e *MockStore_Expecter) ListShards(tx interface{}, p interface{}) *MockStore_ListShards_Call { - return &MockStore_ListShards_Call{Call: _e.mock.On("ListShards", tx, p)} +// - _a0 *bbolt.Tx +// - _a1 store.PartitionKey +func (_e *MockStore_Expecter) ListShards(_a0 interface{}, _a1 interface{}) *MockStore_ListShards_Call { + return &MockStore_ListShards_Call{Call: _e.mock.On("ListShards", _a0, _a1)} } -func (_c *MockStore_ListShards_Call) Run(run func(tx *bbolt.Tx, p index.PartitionKey)) *MockStore_ListShards_Call { +func (_c *MockStore_ListShards_Call) Run(run func(_a0 *bbolt.Tx, _a1 store.PartitionKey)) *MockStore_ListShards_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(*bbolt.Tx), args[1].(index.PartitionKey)) + run(args[0].(*bbolt.Tx), args[1].(store.PartitionKey)) }) return _c } @@ -166,13 +262,13 @@ func (_c *MockStore_ListShards_Call) Return(_a0 []uint32) *MockStore_ListShards_ return _c } -func (_c *MockStore_ListShards_Call) RunAndReturn(run func(*bbolt.Tx, index.PartitionKey) []uint32) *MockStore_ListShards_Call { +func (_c *MockStore_ListShards_Call) RunAndReturn(run func(*bbolt.Tx, store.PartitionKey) []uint32) *MockStore_ListShards_Call { _c.Call.Return(run) return _c } // ListTenants provides a mock function with given fields: tx, p, shard -func (_m *MockStore) ListTenants(tx *bbolt.Tx, p index.PartitionKey, shard uint32) []string { +func (_m *MockStore) ListTenants(tx *bbolt.Tx, p store.PartitionKey, shard uint32) []string { ret := _m.Called(tx, p, shard) if len(ret) == 0 { @@ -180,7 +276,7 @@ func (_m *MockStore) ListTenants(tx *bbolt.Tx, p index.PartitionKey, shard uint3 } var r0 []string - if rf, ok := ret.Get(0).(func(*bbolt.Tx, index.PartitionKey, uint32) []string); ok { + if rf, ok := ret.Get(0).(func(*bbolt.Tx, store.PartitionKey, uint32) []string); ok { r0 = rf(tx, p, shard) } else { if ret.Get(0) != nil { @@ -198,15 +294,15 @@ type MockStore_ListTenants_Call struct { // ListTenants is a helper method to define mock.On call // - tx *bbolt.Tx -// - p index.PartitionKey +// - p store.PartitionKey // - shard uint32 func (_e *MockStore_Expecter) ListTenants(tx interface{}, p interface{}, shard interface{}) *MockStore_ListTenants_Call { return &MockStore_ListTenants_Call{Call: _e.mock.On("ListTenants", tx, p, shard)} } -func (_c *MockStore_ListTenants_Call) Run(run func(tx *bbolt.Tx, p index.PartitionKey, shard uint32)) *MockStore_ListTenants_Call { +func (_c *MockStore_ListTenants_Call) Run(run func(tx *bbolt.Tx, p store.PartitionKey, shard uint32)) *MockStore_ListTenants_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(*bbolt.Tx), args[1].(index.PartitionKey), args[2].(uint32)) + run(args[0].(*bbolt.Tx), args[1].(store.PartitionKey), args[2].(uint32)) }) return _c } @@ -216,7 +312,55 @@ func (_c *MockStore_ListTenants_Call) Return(_a0 []string) *MockStore_ListTenant return _c } -func (_c *MockStore_ListTenants_Call) RunAndReturn(run func(*bbolt.Tx, index.PartitionKey, uint32) []string) *MockStore_ListTenants_Call { +func (_c *MockStore_ListTenants_Call) RunAndReturn(run func(*bbolt.Tx, store.PartitionKey, uint32) []string) *MockStore_ListTenants_Call { + _c.Call.Return(run) + return _c +} + +// StoreBlock provides a mock function with given fields: _a0, _a1, _a2 +func (_m *MockStore) StoreBlock(_a0 *bbolt.Tx, _a1 store.PartitionKey, _a2 *metastorev1.BlockMeta) error { + ret := _m.Called(_a0, _a1, _a2) + + if len(ret) == 0 { + panic("no return value specified for StoreBlock") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*bbolt.Tx, store.PartitionKey, *metastorev1.BlockMeta) error); ok { + r0 = rf(_a0, _a1, _a2) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockStore_StoreBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StoreBlock' +type MockStore_StoreBlock_Call struct { + *mock.Call +} + +// StoreBlock is a helper method to define mock.On call +// - _a0 *bbolt.Tx +// - _a1 store.PartitionKey +// - _a2 *metastorev1.BlockMeta +func (_e *MockStore_Expecter) StoreBlock(_a0 interface{}, _a1 interface{}, _a2 interface{}) *MockStore_StoreBlock_Call { + return &MockStore_StoreBlock_Call{Call: _e.mock.On("StoreBlock", _a0, _a1, _a2)} +} + +func (_c *MockStore_StoreBlock_Call) Run(run func(_a0 *bbolt.Tx, _a1 store.PartitionKey, _a2 *metastorev1.BlockMeta)) *MockStore_StoreBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*bbolt.Tx), args[1].(store.PartitionKey), args[2].(*metastorev1.BlockMeta)) + }) + return _c +} + +func (_c *MockStore_StoreBlock_Call) Return(_a0 error) *MockStore_StoreBlock_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockStore_StoreBlock_Call) RunAndReturn(run func(*bbolt.Tx, store.PartitionKey, *metastorev1.BlockMeta) error) *MockStore_StoreBlock_Call { _c.Call.Return(run) return _c } diff --git a/pkg/test/mocks/mockmetastorev1/mock_index_service_client.go b/pkg/test/mocks/mockmetastorev1/mock_index_service_client.go index dcb258e879..6c693b98fc 100644 --- a/pkg/test/mocks/mockmetastorev1/mock_index_service_client.go +++ b/pkg/test/mocks/mockmetastorev1/mock_index_service_client.go @@ -99,6 +99,80 @@ func (_c *MockIndexServiceClient_AddBlock_Call) RunAndReturn(run func(context.Co return _c } +// GetBlockMetadata provides a mock function with given fields: ctx, in, opts +func (_m *MockIndexServiceClient) GetBlockMetadata(ctx context.Context, in *metastorev1.GetBlockMetadataRequest, opts ...grpc.CallOption) (*metastorev1.GetBlockMetadataResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for GetBlockMetadata") + } + + var r0 *metastorev1.GetBlockMetadataResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *metastorev1.GetBlockMetadataRequest, ...grpc.CallOption) (*metastorev1.GetBlockMetadataResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *metastorev1.GetBlockMetadataRequest, ...grpc.CallOption) *metastorev1.GetBlockMetadataResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*metastorev1.GetBlockMetadataResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *metastorev1.GetBlockMetadataRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockIndexServiceClient_GetBlockMetadata_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockMetadata' +type MockIndexServiceClient_GetBlockMetadata_Call struct { + *mock.Call +} + +// GetBlockMetadata is a helper method to define mock.On call +// - ctx context.Context +// - in *metastorev1.GetBlockMetadataRequest +// - opts ...grpc.CallOption +func (_e *MockIndexServiceClient_Expecter) GetBlockMetadata(ctx interface{}, in interface{}, opts ...interface{}) *MockIndexServiceClient_GetBlockMetadata_Call { + return &MockIndexServiceClient_GetBlockMetadata_Call{Call: _e.mock.On("GetBlockMetadata", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *MockIndexServiceClient_GetBlockMetadata_Call) Run(run func(ctx context.Context, in *metastorev1.GetBlockMetadataRequest, opts ...grpc.CallOption)) *MockIndexServiceClient_GetBlockMetadata_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*metastorev1.GetBlockMetadataRequest), variadicArgs...) + }) + return _c +} + +func (_c *MockIndexServiceClient_GetBlockMetadata_Call) Return(_a0 *metastorev1.GetBlockMetadataResponse, _a1 error) *MockIndexServiceClient_GetBlockMetadata_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockIndexServiceClient_GetBlockMetadata_Call) RunAndReturn(run func(context.Context, *metastorev1.GetBlockMetadataRequest, ...grpc.CallOption) (*metastorev1.GetBlockMetadataResponse, error)) *MockIndexServiceClient_GetBlockMetadata_Call { + _c.Call.Return(run) + return _c +} + // NewMockIndexServiceClient creates a new instance of MockIndexServiceClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewMockIndexServiceClient(t interface { diff --git a/pkg/test/mocks/mockmetastorev1/mock_index_service_server.go b/pkg/test/mocks/mockmetastorev1/mock_index_service_server.go index e0e54833e2..f872a4d294 100644 --- a/pkg/test/mocks/mockmetastorev1/mock_index_service_server.go +++ b/pkg/test/mocks/mockmetastorev1/mock_index_service_server.go @@ -81,6 +81,65 @@ func (_c *MockIndexServiceServer_AddBlock_Call) RunAndReturn(run func(context.Co return _c } +// GetBlockMetadata provides a mock function with given fields: _a0, _a1 +func (_m *MockIndexServiceServer) GetBlockMetadata(_a0 context.Context, _a1 *metastorev1.GetBlockMetadataRequest) (*metastorev1.GetBlockMetadataResponse, error) { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for GetBlockMetadata") + } + + var r0 *metastorev1.GetBlockMetadataResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *metastorev1.GetBlockMetadataRequest) (*metastorev1.GetBlockMetadataResponse, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, *metastorev1.GetBlockMetadataRequest) *metastorev1.GetBlockMetadataResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*metastorev1.GetBlockMetadataResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *metastorev1.GetBlockMetadataRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockIndexServiceServer_GetBlockMetadata_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockMetadata' +type MockIndexServiceServer_GetBlockMetadata_Call struct { + *mock.Call +} + +// GetBlockMetadata is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 *metastorev1.GetBlockMetadataRequest +func (_e *MockIndexServiceServer_Expecter) GetBlockMetadata(_a0 interface{}, _a1 interface{}) *MockIndexServiceServer_GetBlockMetadata_Call { + return &MockIndexServiceServer_GetBlockMetadata_Call{Call: _e.mock.On("GetBlockMetadata", _a0, _a1)} +} + +func (_c *MockIndexServiceServer_GetBlockMetadata_Call) Run(run func(_a0 context.Context, _a1 *metastorev1.GetBlockMetadataRequest)) *MockIndexServiceServer_GetBlockMetadata_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*metastorev1.GetBlockMetadataRequest)) + }) + return _c +} + +func (_c *MockIndexServiceServer_GetBlockMetadata_Call) Return(_a0 *metastorev1.GetBlockMetadataResponse, _a1 error) *MockIndexServiceServer_GetBlockMetadata_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockIndexServiceServer_GetBlockMetadata_Call) RunAndReturn(run func(context.Context, *metastorev1.GetBlockMetadataRequest) (*metastorev1.GetBlockMetadataResponse, error)) *MockIndexServiceServer_GetBlockMetadata_Call { + _c.Call.Return(run) + return _c +} + // mustEmbedUnimplementedIndexServiceServer provides a mock function with given fields: func (_m *MockIndexServiceServer) mustEmbedUnimplementedIndexServiceServer() { _m.Called() diff --git a/pkg/test/mocks/mockscheduler/mock_job_store.go b/pkg/test/mocks/mockscheduler/mock_job_store.go new file mode 100644 index 0000000000..bcf762d27b --- /dev/null +++ b/pkg/test/mocks/mockscheduler/mock_job_store.go @@ -0,0 +1,379 @@ +// Code generated by mockery. DO NOT EDIT. + +package mockscheduler + +import ( + iter "github.com/grafana/pyroscope/pkg/iter" + mock "github.com/stretchr/testify/mock" + bbolt "go.etcd.io/bbolt" + + raft_log "github.com/grafana/pyroscope/api/gen/proto/go/metastore/v1/raft_log" +) + +// MockJobStore is an autogenerated mock type for the JobStore type +type MockJobStore struct { + mock.Mock +} + +type MockJobStore_Expecter struct { + mock *mock.Mock +} + +func (_m *MockJobStore) EXPECT() *MockJobStore_Expecter { + return &MockJobStore_Expecter{mock: &_m.Mock} +} + +// CreateBuckets provides a mock function with given fields: _a0 +func (_m *MockJobStore) CreateBuckets(_a0 *bbolt.Tx) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for CreateBuckets") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*bbolt.Tx) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockJobStore_CreateBuckets_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateBuckets' +type MockJobStore_CreateBuckets_Call struct { + *mock.Call +} + +// CreateBuckets is a helper method to define mock.On call +// - _a0 *bbolt.Tx +func (_e *MockJobStore_Expecter) CreateBuckets(_a0 interface{}) *MockJobStore_CreateBuckets_Call { + return &MockJobStore_CreateBuckets_Call{Call: _e.mock.On("CreateBuckets", _a0)} +} + +func (_c *MockJobStore_CreateBuckets_Call) Run(run func(_a0 *bbolt.Tx)) *MockJobStore_CreateBuckets_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*bbolt.Tx)) + }) + return _c +} + +func (_c *MockJobStore_CreateBuckets_Call) Return(_a0 error) *MockJobStore_CreateBuckets_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockJobStore_CreateBuckets_Call) RunAndReturn(run func(*bbolt.Tx) error) *MockJobStore_CreateBuckets_Call { + _c.Call.Return(run) + return _c +} + +// DeleteJobPlan provides a mock function with given fields: tx, name +func (_m *MockJobStore) DeleteJobPlan(tx *bbolt.Tx, name string) error { + ret := _m.Called(tx, name) + + if len(ret) == 0 { + panic("no return value specified for DeleteJobPlan") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*bbolt.Tx, string) error); ok { + r0 = rf(tx, name) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockJobStore_DeleteJobPlan_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteJobPlan' +type MockJobStore_DeleteJobPlan_Call struct { + *mock.Call +} + +// DeleteJobPlan is a helper method to define mock.On call +// - tx *bbolt.Tx +// - name string +func (_e *MockJobStore_Expecter) DeleteJobPlan(tx interface{}, name interface{}) *MockJobStore_DeleteJobPlan_Call { + return &MockJobStore_DeleteJobPlan_Call{Call: _e.mock.On("DeleteJobPlan", tx, name)} +} + +func (_c *MockJobStore_DeleteJobPlan_Call) Run(run func(tx *bbolt.Tx, name string)) *MockJobStore_DeleteJobPlan_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*bbolt.Tx), args[1].(string)) + }) + return _c +} + +func (_c *MockJobStore_DeleteJobPlan_Call) Return(_a0 error) *MockJobStore_DeleteJobPlan_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockJobStore_DeleteJobPlan_Call) RunAndReturn(run func(*bbolt.Tx, string) error) *MockJobStore_DeleteJobPlan_Call { + _c.Call.Return(run) + return _c +} + +// DeleteJobState provides a mock function with given fields: tx, name +func (_m *MockJobStore) DeleteJobState(tx *bbolt.Tx, name string) error { + ret := _m.Called(tx, name) + + if len(ret) == 0 { + panic("no return value specified for DeleteJobState") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*bbolt.Tx, string) error); ok { + r0 = rf(tx, name) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockJobStore_DeleteJobState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteJobState' +type MockJobStore_DeleteJobState_Call struct { + *mock.Call +} + +// DeleteJobState is a helper method to define mock.On call +// - tx *bbolt.Tx +// - name string +func (_e *MockJobStore_Expecter) DeleteJobState(tx interface{}, name interface{}) *MockJobStore_DeleteJobState_Call { + return &MockJobStore_DeleteJobState_Call{Call: _e.mock.On("DeleteJobState", tx, name)} +} + +func (_c *MockJobStore_DeleteJobState_Call) Run(run func(tx *bbolt.Tx, name string)) *MockJobStore_DeleteJobState_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*bbolt.Tx), args[1].(string)) + }) + return _c +} + +func (_c *MockJobStore_DeleteJobState_Call) Return(_a0 error) *MockJobStore_DeleteJobState_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockJobStore_DeleteJobState_Call) RunAndReturn(run func(*bbolt.Tx, string) error) *MockJobStore_DeleteJobState_Call { + _c.Call.Return(run) + return _c +} + +// GetJobPlan provides a mock function with given fields: tx, name +func (_m *MockJobStore) GetJobPlan(tx *bbolt.Tx, name string) (*raft_log.CompactionJobPlan, error) { + ret := _m.Called(tx, name) + + if len(ret) == 0 { + panic("no return value specified for GetJobPlan") + } + + var r0 *raft_log.CompactionJobPlan + var r1 error + if rf, ok := ret.Get(0).(func(*bbolt.Tx, string) (*raft_log.CompactionJobPlan, error)); ok { + return rf(tx, name) + } + if rf, ok := ret.Get(0).(func(*bbolt.Tx, string) *raft_log.CompactionJobPlan); ok { + r0 = rf(tx, name) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*raft_log.CompactionJobPlan) + } + } + + if rf, ok := ret.Get(1).(func(*bbolt.Tx, string) error); ok { + r1 = rf(tx, name) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockJobStore_GetJobPlan_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetJobPlan' +type MockJobStore_GetJobPlan_Call struct { + *mock.Call +} + +// GetJobPlan is a helper method to define mock.On call +// - tx *bbolt.Tx +// - name string +func (_e *MockJobStore_Expecter) GetJobPlan(tx interface{}, name interface{}) *MockJobStore_GetJobPlan_Call { + return &MockJobStore_GetJobPlan_Call{Call: _e.mock.On("GetJobPlan", tx, name)} +} + +func (_c *MockJobStore_GetJobPlan_Call) Run(run func(tx *bbolt.Tx, name string)) *MockJobStore_GetJobPlan_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*bbolt.Tx), args[1].(string)) + }) + return _c +} + +func (_c *MockJobStore_GetJobPlan_Call) Return(_a0 *raft_log.CompactionJobPlan, _a1 error) *MockJobStore_GetJobPlan_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockJobStore_GetJobPlan_Call) RunAndReturn(run func(*bbolt.Tx, string) (*raft_log.CompactionJobPlan, error)) *MockJobStore_GetJobPlan_Call { + _c.Call.Return(run) + return _c +} + +// ListEntries provides a mock function with given fields: _a0 +func (_m *MockJobStore) ListEntries(_a0 *bbolt.Tx) iter.Iterator[*raft_log.CompactionJobState] { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for ListEntries") + } + + var r0 iter.Iterator[*raft_log.CompactionJobState] + if rf, ok := ret.Get(0).(func(*bbolt.Tx) iter.Iterator[*raft_log.CompactionJobState]); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(iter.Iterator[*raft_log.CompactionJobState]) + } + } + + return r0 +} + +// MockJobStore_ListEntries_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListEntries' +type MockJobStore_ListEntries_Call struct { + *mock.Call +} + +// ListEntries is a helper method to define mock.On call +// - _a0 *bbolt.Tx +func (_e *MockJobStore_Expecter) ListEntries(_a0 interface{}) *MockJobStore_ListEntries_Call { + return &MockJobStore_ListEntries_Call{Call: _e.mock.On("ListEntries", _a0)} +} + +func (_c *MockJobStore_ListEntries_Call) Run(run func(_a0 *bbolt.Tx)) *MockJobStore_ListEntries_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*bbolt.Tx)) + }) + return _c +} + +func (_c *MockJobStore_ListEntries_Call) Return(_a0 iter.Iterator[*raft_log.CompactionJobState]) *MockJobStore_ListEntries_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockJobStore_ListEntries_Call) RunAndReturn(run func(*bbolt.Tx) iter.Iterator[*raft_log.CompactionJobState]) *MockJobStore_ListEntries_Call { + _c.Call.Return(run) + return _c +} + +// StoreJobPlan provides a mock function with given fields: _a0, _a1 +func (_m *MockJobStore) StoreJobPlan(_a0 *bbolt.Tx, _a1 *raft_log.CompactionJobPlan) error { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for StoreJobPlan") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*bbolt.Tx, *raft_log.CompactionJobPlan) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockJobStore_StoreJobPlan_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StoreJobPlan' +type MockJobStore_StoreJobPlan_Call struct { + *mock.Call +} + +// StoreJobPlan is a helper method to define mock.On call +// - _a0 *bbolt.Tx +// - _a1 *raft_log.CompactionJobPlan +func (_e *MockJobStore_Expecter) StoreJobPlan(_a0 interface{}, _a1 interface{}) *MockJobStore_StoreJobPlan_Call { + return &MockJobStore_StoreJobPlan_Call{Call: _e.mock.On("StoreJobPlan", _a0, _a1)} +} + +func (_c *MockJobStore_StoreJobPlan_Call) Run(run func(_a0 *bbolt.Tx, _a1 *raft_log.CompactionJobPlan)) *MockJobStore_StoreJobPlan_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*bbolt.Tx), args[1].(*raft_log.CompactionJobPlan)) + }) + return _c +} + +func (_c *MockJobStore_StoreJobPlan_Call) Return(_a0 error) *MockJobStore_StoreJobPlan_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockJobStore_StoreJobPlan_Call) RunAndReturn(run func(*bbolt.Tx, *raft_log.CompactionJobPlan) error) *MockJobStore_StoreJobPlan_Call { + _c.Call.Return(run) + return _c +} + +// StoreJobState provides a mock function with given fields: _a0, _a1 +func (_m *MockJobStore) StoreJobState(_a0 *bbolt.Tx, _a1 *raft_log.CompactionJobState) error { + ret := _m.Called(_a0, _a1) + + if len(ret) == 0 { + panic("no return value specified for StoreJobState") + } + + var r0 error + if rf, ok := ret.Get(0).(func(*bbolt.Tx, *raft_log.CompactionJobState) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockJobStore_StoreJobState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StoreJobState' +type MockJobStore_StoreJobState_Call struct { + *mock.Call +} + +// StoreJobState is a helper method to define mock.On call +// - _a0 *bbolt.Tx +// - _a1 *raft_log.CompactionJobState +func (_e *MockJobStore_Expecter) StoreJobState(_a0 interface{}, _a1 interface{}) *MockJobStore_StoreJobState_Call { + return &MockJobStore_StoreJobState_Call{Call: _e.mock.On("StoreJobState", _a0, _a1)} +} + +func (_c *MockJobStore_StoreJobState_Call) Run(run func(_a0 *bbolt.Tx, _a1 *raft_log.CompactionJobState)) *MockJobStore_StoreJobState_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(*bbolt.Tx), args[1].(*raft_log.CompactionJobState)) + }) + return _c +} + +func (_c *MockJobStore_StoreJobState_Call) Return(_a0 error) *MockJobStore_StoreJobState_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockJobStore_StoreJobState_Call) RunAndReturn(run func(*bbolt.Tx, *raft_log.CompactionJobState) error) *MockJobStore_StoreJobState_Call { + _c.Call.Return(run) + return _c +} + +// NewMockJobStore creates a new instance of MockJobStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockJobStore(t interface { + mock.TestingT + Cleanup(func()) +}) *MockJobStore { + mock := &MockJobStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/test/time.go b/pkg/test/time.go new file mode 100644 index 0000000000..89bb8e66f6 --- /dev/null +++ b/pkg/test/time.go @@ -0,0 +1,25 @@ +package test + +import ( + "crypto/rand" + "time" + + "github.com/oklog/ulid" + "github.com/prometheus/common/model" +) + +func ULID(t string) string { + parsed, _ := time.Parse(time.RFC3339, t) + l := ulid.MustNew(ulid.Timestamp(parsed), rand.Reader) + return l.String() +} + +func Time(t string) int64 { + ts, _ := time.Parse(time.RFC3339, t) + return ts.UnixMilli() +} + +func Duration(d string) time.Duration { + parsed, _ := model.ParseDuration(d) + return time.Duration(parsed) +} diff --git a/pkg/util/concurrency.go b/pkg/util/concurrency.go new file mode 100644 index 0000000000..a7edff0453 --- /dev/null +++ b/pkg/util/concurrency.go @@ -0,0 +1,48 @@ +package util + +import ( + "runtime" + "strconv" + + _ "go.uber.org/automaxprocs" +) + +type ConcurrencyLimit int + +func GoMaxProcsConcurrencyLimit() *ConcurrencyLimit { + lim := ConcurrencyLimit(runtime.GOMAXPROCS(-1)) + return &lim +} + +func (c *ConcurrencyLimit) String() string { + if *c == 0 { + return "auto" + } + return strconv.Itoa(int(*c)) +} + +func (c *ConcurrencyLimit) Set(v string) (err error) { + var p int + if v == "" || v == "auto" { + p = runtime.GOMAXPROCS(-1) + } else { + p, err = strconv.Atoi(v) + } + if err != nil { + return err + } + if p < 1 { + *c = 1 + return + } + *c = ConcurrencyLimit(p) + return nil +} + +func (c *ConcurrencyLimit) UnmarshalText(text []byte) error { + return c.Set(string(text)) +} + +func (c *ConcurrencyLimit) MarshalText() ([]byte, error) { + return []byte(c.String()), nil +}