From e739b3d0ba8acbb1589a3b5873fc158ecda70857 Mon Sep 17 00:00:00 2001 From: SungJin1212 Date: Mon, 11 Nov 2024 10:32:44 +0900 Subject: [PATCH] Integrate cortexpbv2 to cortexpb Signed-off-by: SungJin1212 --- Makefile | 1 - pkg/api/api.go | 3 +- pkg/cortexpb/codecv2.go | 34 + pkg/cortexpb/compat.go | 4 +- pkg/cortexpb/compat_test.go | 4 +- pkg/{cortexpbv2 => cortexpb}/compatv2.go | 12 +- pkg/{cortexpbv2 => cortexpb}/compatv2_test.go | 12 +- pkg/cortexpb/cortex.pb.go | 2359 ++++++++- pkg/cortexpb/cortex.proto | 117 +- pkg/{cortexpbv2 => cortexpb}/timeseriesv2.go | 63 +- .../timeseriesv2_test.go | 20 +- pkg/cortexpbv2/codecv2.go | 160 - pkg/cortexpbv2/cortexv2.pb.go | 4340 ----------------- pkg/cortexpbv2/cortexv2.proto | 227 - pkg/distributor/distributor.go | 112 +- pkg/distributor/distributor_prw2_test.go | 259 +- pkg/distributor/distributor_test.go | 15 +- .../distributorpb/distributor.pb.go | 38 +- .../distributorpb/distributor.proto | 3 +- pkg/ingester/client/client.go | 15 +- pkg/ingester/client/ingester.pb.go | 189 +- pkg/ingester/client/ingester.proto | 3 +- pkg/ingester/ingester.go | 33 +- pkg/ingester/ingester_prw2_test.go | 576 ++- pkg/ruler/compat.go | 3 +- pkg/util/push/otlp_test.go | 2 +- pkg/util/push/push.go | 7 +- pkg/util/push/push_test.go | 39 +- pkg/util/validation/validate.go | 66 +- 29 files changed, 3014 insertions(+), 5702 deletions(-) create mode 100644 pkg/cortexpb/codecv2.go rename pkg/{cortexpbv2 => cortexpb}/compatv2.go (85%) rename pkg/{cortexpbv2 => cortexpb}/compatv2_test.go (81%) rename pkg/{cortexpbv2 => cortexpb}/timeseriesv2.go (66%) rename pkg/{cortexpbv2 => cortexpb}/timeseriesv2_test.go (82%) delete mode 100644 pkg/cortexpbv2/codecv2.go delete mode 100644 pkg/cortexpbv2/cortexv2.pb.go delete mode 100644 pkg/cortexpbv2/cortexv2.proto diff --git a/Makefile b/Makefile index 5936409ebb..2704ee35b6 100644 --- a/Makefile +++ b/Makefile @@ -85,7 +85,6 @@ $(foreach exe, $(EXES), $(eval $(call dep_exe, $(exe)))) # Manually declared dependencies And what goes into each exe pkg/cortexpb/cortex.pb.go: pkg/cortexpb/cortex.proto -pkg/cortexpbv2/cortexv2.pb.go: pkg/cortexpbv2/cortexv2.proto pkg/ingester/client/ingester.pb.go: pkg/ingester/client/ingester.proto pkg/distributor/distributorpb/distributor.pb.go: pkg/distributor/distributorpb/distributor.proto pkg/ingester/wal.pb.go: pkg/ingester/wal.proto diff --git a/pkg/api/api.go b/pkg/api/api.go index 5afd9504ef..7145d5b8e2 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -23,7 +23,6 @@ import ( "github.com/cortexproject/cortex/pkg/alertmanager/alertmanagerpb" "github.com/cortexproject/cortex/pkg/compactor" "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/cortexpbv2" "github.com/cortexproject/cortex/pkg/distributor" "github.com/cortexproject/cortex/pkg/distributor/distributorpb" frontendv1 "github.com/cortexproject/cortex/pkg/frontend/v1" @@ -318,7 +317,7 @@ type Ingester interface { AllUserStatsHandler(http.ResponseWriter, *http.Request) ModeHandler(http.ResponseWriter, *http.Request) Push(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) - PushV2(context.Context, *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) + PushV2(context.Context, *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error) } // RegisterIngester registers the ingesters HTTP and GRPC service diff --git a/pkg/cortexpb/codecv2.go b/pkg/cortexpb/codecv2.go new file mode 100644 index 0000000000..a80b3b314c --- /dev/null +++ b/pkg/cortexpb/codecv2.go @@ -0,0 +1,34 @@ +package cortexpb + +import ( + "github.com/prometheus/prometheus/model/labels" +) + +// ToLabels return model labels.Labels from timeseries' remote labels. +func (t TimeSeriesV2) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels { + return desymbolizeLabels(b, t.GetLabelsRefs(), symbols) +} + +// ToLabels return model labels.Labels from exemplar remote labels. +func (e ExemplarV2) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels { + return desymbolizeLabels(b, e.GetLabelsRefs(), symbols) +} + +func (m MetadataV2) ToV1Metadata(name string, symbols []string) *MetricMetadata { + return &MetricMetadata{ + Type: m.Type, + MetricFamilyName: name, + Unit: symbols[m.UnitRef], + Help: symbols[m.HelpRef], + } +} + +// desymbolizeLabels decodes label references, with given symbols to labels. +func desymbolizeLabels(b *labels.ScratchBuilder, labelRefs []uint32, symbols []string) labels.Labels { + b.Reset() + for i := 0; i < len(labelRefs); i += 2 { + b.Add(symbols[labelRefs[i]], symbols[labelRefs[i+1]]) + } + b.Sort() + return b.Labels() +} diff --git a/pkg/cortexpb/compat.go b/pkg/cortexpb/compat.go index 6de2423d56..4d8cb0c1ee 100644 --- a/pkg/cortexpb/compat.go +++ b/pkg/cortexpb/compat.go @@ -20,7 +20,7 @@ import ( // ToWriteRequest converts matched slices of Labels, Samples, Metadata and Histograms into a WriteRequest proto. // It gets timeseries from the pool, so ReuseSlice() should be called when done. -func ToWriteRequest(lbls []labels.Labels, samples []Sample, metadata []*MetricMetadata, histograms []Histogram, source WriteRequest_SourceEnum) *WriteRequest { +func ToWriteRequest(lbls []labels.Labels, samples []Sample, metadata []*MetricMetadata, histograms []Histogram, source SourceEnum) *WriteRequest { req := &WriteRequest{ Timeseries: PreallocTimeseriesSliceFromPool(), Metadata: metadata, @@ -170,7 +170,7 @@ func (s byLabel) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // MetricMetadataMetricTypeToMetricType converts a metric type from our internal client // to a Prometheus one. -func MetricMetadataMetricTypeToMetricType(mt MetricMetadata_MetricType) model.MetricType { +func MetricMetadataMetricTypeToMetricType(mt MetricType) model.MetricType { switch mt { case UNKNOWN: return model.MetricTypeUnknown diff --git a/pkg/cortexpb/compat_test.go b/pkg/cortexpb/compat_test.go index 6fda91a84e..1f8d11ad1c 100644 --- a/pkg/cortexpb/compat_test.go +++ b/pkg/cortexpb/compat_test.go @@ -74,7 +74,7 @@ func testUnmarshalling(t *testing.T, unmarshalFn func(data []byte, v interface{} func TestMetricMetadataToMetricTypeToMetricType(t *testing.T) { tc := []struct { desc string - input MetricMetadata_MetricType + input MetricType expected model.MetricType }{ { @@ -89,7 +89,7 @@ func TestMetricMetadataToMetricTypeToMetricType(t *testing.T) { }, { desc: "with an unknown metric", - input: MetricMetadata_MetricType(100), + input: MetricType(100), expected: model.MetricTypeUnknown, }, } diff --git a/pkg/cortexpbv2/compatv2.go b/pkg/cortexpb/compatv2.go similarity index 85% rename from pkg/cortexpbv2/compatv2.go rename to pkg/cortexpb/compatv2.go index 13d42d000f..ed6e441624 100644 --- a/pkg/cortexpbv2/compatv2.go +++ b/pkg/cortexpb/compatv2.go @@ -1,14 +1,12 @@ -package cortexpbv2 +package cortexpb import ( "github.com/prometheus/prometheus/model/labels" writev2 "github.com/prometheus/prometheus/prompb/io/prometheus/write/v2" - - "github.com/cortexproject/cortex/pkg/cortexpb" ) // ToWriteRequestV2 converts matched slices of Labels, Samples, and Histograms into a WriteRequest proto. -func ToWriteRequestV2(lbls []labels.Labels, samples []Sample, histograms []Histogram, metadata []Metadata, source WriteRequest_SourceEnum, help ...string) *WriteRequest { +func ToWriteRequestV2(lbls []labels.Labels, samples []Sample, histograms []Histogram, metadata []MetadataV2, source SourceEnum, help ...string) *WriteRequestV2 { st := writev2.NewSymbolTable() labelRefs := make([][]uint32, 0, len(lbls)) for _, lbl := range lbls { @@ -21,7 +19,7 @@ func ToWriteRequestV2(lbls []labels.Labels, samples []Sample, histograms []Histo symbols := st.Symbols() - req := &WriteRequest{ + req := &WriteRequestV2{ Timeseries: PreallocTimeseriesV2SliceFromPool(), Symbols: symbols, Source: source, @@ -41,13 +39,13 @@ func ToWriteRequestV2(lbls []labels.Labels, samples []Sample, histograms []Histo ts.Metadata = metadata[i] } i++ - req.Timeseries = append(req.Timeseries, PreallocTimeseriesV2{TimeSeries: ts}) + req.Timeseries = append(req.Timeseries, PreallocTimeseriesV2{TimeSeriesV2: ts}) } return req } -func GetLabelRefsFromLabelAdapters(symbols []string, las []cortexpb.LabelAdapter) []uint32 { +func GetLabelRefsFromLabelAdapters(symbols []string, las []LabelAdapter) []uint32 { var ret []uint32 symbolMap := map[string]uint32{} diff --git a/pkg/cortexpbv2/compatv2_test.go b/pkg/cortexpb/compatv2_test.go similarity index 81% rename from pkg/cortexpbv2/compatv2_test.go rename to pkg/cortexpb/compatv2_test.go index a6d809e0a4..b25c5c2064 100644 --- a/pkg/cortexpbv2/compatv2_test.go +++ b/pkg/cortexpb/compatv2_test.go @@ -1,33 +1,31 @@ -package cortexpbv2 +package cortexpb import ( "testing" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" - - "github.com/cortexproject/cortex/pkg/cortexpb" ) func Test_GetLabelRefsFromLabelAdapters(t *testing.T) { tests := []struct { symbols []string - lbs []cortexpb.LabelAdapter + lbs []LabelAdapter expectedSeriesRefs []uint32 }{ { symbols: []string{"", "__name__", "test_metric", "foo", "bar", "baz", "qux"}, - lbs: []cortexpb.LabelAdapter{{Name: "__name__", Value: "test_metric"}, {Name: "foo", Value: "bar"}}, + lbs: []LabelAdapter{{Name: "__name__", Value: "test_metric"}, {Name: "foo", Value: "bar"}}, expectedSeriesRefs: []uint32{1, 2, 3, 4}, }, { symbols: []string{"", "__name__", "test_metric", "foo", "bar", "baz", "qux"}, - lbs: []cortexpb.LabelAdapter{{Name: "__name__", Value: "test_metric"}, {Name: "baz", Value: "qux"}}, + lbs: []LabelAdapter{{Name: "__name__", Value: "test_metric"}, {Name: "baz", Value: "qux"}}, expectedSeriesRefs: []uint32{1, 2, 5, 6}, }, { symbols: []string{"", "__name__", "test_metric", "foo", "bar", "baz", "qux", "1"}, - lbs: []cortexpb.LabelAdapter{{Name: "__name__", Value: "test_metric"}, {Name: "baz", Value: "qux"}, {Name: "qux", Value: "1"}}, + lbs: []LabelAdapter{{Name: "__name__", Value: "test_metric"}, {Name: "baz", Value: "qux"}, {Name: "qux", Value: "1"}}, expectedSeriesRefs: []uint32{1, 2, 5, 6, 6, 7}, }, } diff --git a/pkg/cortexpb/cortex.pb.go b/pkg/cortexpb/cortex.pb.go index 3b63e15904..104a97c807 100644 --- a/pkg/cortexpb/cortex.pb.go +++ b/pkg/cortexpb/cortex.pb.go @@ -28,41 +28,41 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -type WriteRequest_SourceEnum int32 +type SourceEnum int32 const ( - API WriteRequest_SourceEnum = 0 - RULE WriteRequest_SourceEnum = 1 + API SourceEnum = 0 + RULE SourceEnum = 1 ) -var WriteRequest_SourceEnum_name = map[int32]string{ +var SourceEnum_name = map[int32]string{ 0: "API", 1: "RULE", } -var WriteRequest_SourceEnum_value = map[string]int32{ +var SourceEnum_value = map[string]int32{ "API": 0, "RULE": 1, } -func (WriteRequest_SourceEnum) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{0, 0} +func (SourceEnum) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{0} } -type MetricMetadata_MetricType int32 +type MetricType int32 const ( - UNKNOWN MetricMetadata_MetricType = 0 - COUNTER MetricMetadata_MetricType = 1 - GAUGE MetricMetadata_MetricType = 2 - HISTOGRAM MetricMetadata_MetricType = 3 - GAUGEHISTOGRAM MetricMetadata_MetricType = 4 - SUMMARY MetricMetadata_MetricType = 5 - INFO MetricMetadata_MetricType = 6 - STATESET MetricMetadata_MetricType = 7 + UNKNOWN MetricType = 0 + COUNTER MetricType = 1 + GAUGE MetricType = 2 + HISTOGRAM MetricType = 3 + GAUGEHISTOGRAM MetricType = 4 + SUMMARY MetricType = 5 + INFO MetricType = 6 + STATESET MetricType = 7 ) -var MetricMetadata_MetricType_name = map[int32]string{ +var MetricType_name = map[int32]string{ 0: "UNKNOWN", 1: "COUNTER", 2: "GAUGE", @@ -73,7 +73,7 @@ var MetricMetadata_MetricType_name = map[int32]string{ 7: "STATESET", } -var MetricMetadata_MetricType_value = map[string]int32{ +var MetricType_value = map[string]int32{ "UNKNOWN": 0, "COUNTER": 1, "GAUGE": 2, @@ -84,8 +84,8 @@ var MetricMetadata_MetricType_value = map[string]int32{ "STATESET": 7, } -func (MetricMetadata_MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{5, 0} +func (MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{1} } type Histogram_ResetHint int32 @@ -112,20 +112,387 @@ var Histogram_ResetHint_value = map[string]int32{ } func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{8, 0} + return fileDescriptor_893a47d0a749d749, []int{13, 0} +} + +// https://github.com/prometheus/prometheus/blob/main/prompb/io/prometheus/write/v2/types.proto +type WriteRequestV2 struct { + Source SourceEnum `protobuf:"varint,3,opt,name=Source,proto3,enum=cortexpb.SourceEnum" json:"Source,omitempty"` + Symbols []string `protobuf:"bytes,4,rep,name=symbols,proto3" json:"symbols,omitempty"` + Timeseries []PreallocTimeseriesV2 `protobuf:"bytes,5,rep,name=timeseries,proto3,customtype=PreallocTimeseriesV2" json:"timeseries"` + SkipLabelNameValidation bool `protobuf:"varint,1000,opt,name=skip_label_name_validation,json=skipLabelNameValidation,proto3" json:"skip_label_name_validation,omitempty"` +} + +func (m *WriteRequestV2) Reset() { *m = WriteRequestV2{} } +func (*WriteRequestV2) ProtoMessage() {} +func (*WriteRequestV2) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{0} +} +func (m *WriteRequestV2) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WriteRequestV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WriteRequestV2.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WriteRequestV2) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteRequestV2.Merge(m, src) +} +func (m *WriteRequestV2) XXX_Size() int { + return m.Size() +} +func (m *WriteRequestV2) XXX_DiscardUnknown() { + xxx_messageInfo_WriteRequestV2.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteRequestV2 proto.InternalMessageInfo + +func (m *WriteRequestV2) GetSource() SourceEnum { + if m != nil { + return m.Source + } + return API +} + +func (m *WriteRequestV2) GetSymbols() []string { + if m != nil { + return m.Symbols + } + return nil +} + +func (m *WriteRequestV2) GetSkipLabelNameValidation() bool { + if m != nil { + return m.SkipLabelNameValidation + } + return false +} + +type WriteResponseV2 struct { + // Samples represents X-Prometheus-Remote-Write-Written-Samples + Samples int64 `protobuf:"varint,1,opt,name=Samples,proto3" json:"Samples,omitempty"` + // Histograms represents X-Prometheus-Remote-Write-Written-Histograms + Histograms int64 `protobuf:"varint,2,opt,name=Histograms,proto3" json:"Histograms,omitempty"` + // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars + Exemplars int64 `protobuf:"varint,3,opt,name=Exemplars,proto3" json:"Exemplars,omitempty"` +} + +func (m *WriteResponseV2) Reset() { *m = WriteResponseV2{} } +func (*WriteResponseV2) ProtoMessage() {} +func (*WriteResponseV2) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{1} +} +func (m *WriteResponseV2) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WriteResponseV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WriteResponseV2.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WriteResponseV2) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteResponseV2.Merge(m, src) +} +func (m *WriteResponseV2) XXX_Size() int { + return m.Size() +} +func (m *WriteResponseV2) XXX_DiscardUnknown() { + xxx_messageInfo_WriteResponseV2.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteResponseV2 proto.InternalMessageInfo + +func (m *WriteResponseV2) GetSamples() int64 { + if m != nil { + return m.Samples + } + return 0 +} + +func (m *WriteResponseV2) GetHistograms() int64 { + if m != nil { + return m.Histograms + } + return 0 +} + +func (m *WriteResponseV2) GetExemplars() int64 { + if m != nil { + return m.Exemplars + } + return 0 +} + +type TimeSeriesV2 struct { + LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"` + // Timeseries messages can either specify samples or (native) histogram samples + // (histogram field), but not both. For a typical sender (real-time metric + // streaming), in healthy cases, there will be only one sample or histogram. + // + // Samples and histograms are sorted by timestamp (older first). + Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` + Histograms []Histogram `protobuf:"bytes,3,rep,name=histograms,proto3" json:"histograms"` + // exemplars represents an optional set of exemplars attached to this series' samples. + Exemplars []ExemplarV2 `protobuf:"bytes,4,rep,name=exemplars,proto3" json:"exemplars"` + // metadata represents the metadata associated with the given series' samples. + Metadata MetadataV2 `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata"` + // created_timestamp represents an optional created timestamp associated with + // this series' samples in ms format, typically for counter or histogram type + // metrics. Created timestamp represents the time when the counter started + // counting (sometimes referred to as start timestamp), which can increase + // the accuracy of query results. + // + // Note that some receivers might require this and in return fail to + // ingest such samples within the Request. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + // + // Note that the "optional" keyword is omitted due to + // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields + // Zero value means value not set. If you need to use exactly zero value for + // the timestamp, use 1 millisecond before or after. + CreatedTimestamp int64 `protobuf:"varint,6,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` +} + +func (m *TimeSeriesV2) Reset() { *m = TimeSeriesV2{} } +func (*TimeSeriesV2) ProtoMessage() {} +func (*TimeSeriesV2) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{2} +} +func (m *TimeSeriesV2) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeSeriesV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeSeriesV2.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimeSeriesV2) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeriesV2.Merge(m, src) +} +func (m *TimeSeriesV2) XXX_Size() int { + return m.Size() +} +func (m *TimeSeriesV2) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeriesV2.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeriesV2 proto.InternalMessageInfo + +func (m *TimeSeriesV2) GetLabelsRefs() []uint32 { + if m != nil { + return m.LabelsRefs + } + return nil +} + +func (m *TimeSeriesV2) GetSamples() []Sample { + if m != nil { + return m.Samples + } + return nil +} + +func (m *TimeSeriesV2) GetHistograms() []Histogram { + if m != nil { + return m.Histograms + } + return nil +} + +func (m *TimeSeriesV2) GetExemplars() []ExemplarV2 { + if m != nil { + return m.Exemplars + } + return nil +} + +func (m *TimeSeriesV2) GetMetadata() MetadataV2 { + if m != nil { + return m.Metadata + } + return MetadataV2{} +} + +func (m *TimeSeriesV2) GetCreatedTimestamp() int64 { + if m != nil { + return m.CreatedTimestamp + } + return 0 +} + +// Exemplar is an additional information attached to some series' samples. +// It is typically used to attach an example trace or request ID associated with +// the metric changes. +type ExemplarV2 struct { + // labels_refs is an optional list of label name-value pair references, encoded + // as indices to the Request.symbols array. This list's len is always + // a multiple of 2, and the underlying labels should be sorted lexicographically. + // If the exemplar references a trace it should use the `trace_id` label name, as a best practice. + LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"` + // value represents an exact example value. This can be useful when the exemplar + // is attached to a histogram, which only gives an estimated value through buckets. + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + // timestamp represents the timestamp of the exemplar in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (m *ExemplarV2) Reset() { *m = ExemplarV2{} } +func (*ExemplarV2) ProtoMessage() {} +func (*ExemplarV2) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{3} +} +func (m *ExemplarV2) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExemplarV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExemplarV2.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExemplarV2) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExemplarV2.Merge(m, src) +} +func (m *ExemplarV2) XXX_Size() int { + return m.Size() +} +func (m *ExemplarV2) XXX_DiscardUnknown() { + xxx_messageInfo_ExemplarV2.DiscardUnknown(m) +} + +var xxx_messageInfo_ExemplarV2 proto.InternalMessageInfo + +func (m *ExemplarV2) GetLabelsRefs() []uint32 { + if m != nil { + return m.LabelsRefs + } + return nil +} + +func (m *ExemplarV2) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *ExemplarV2) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// Metadata represents the metadata associated with the given series' samples. +type MetadataV2 struct { + Type MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=cortexpb.MetricType" json:"type,omitempty"` + // help_ref is a reference to the Request.symbols array representing help + // text for the metric. Help is optional, reference should point to an empty string in + // such a case. + HelpRef uint32 `protobuf:"varint,3,opt,name=help_ref,json=helpRef,proto3" json:"help_ref,omitempty"` + // unit_ref is a reference to the Request.symbols array representing a unit + // for the metric. Unit is optional, reference should point to an empty string in + // such a case. + UnitRef uint32 `protobuf:"varint,4,opt,name=unit_ref,json=unitRef,proto3" json:"unit_ref,omitempty"` +} + +func (m *MetadataV2) Reset() { *m = MetadataV2{} } +func (*MetadataV2) ProtoMessage() {} +func (*MetadataV2) Descriptor() ([]byte, []int) { + return fileDescriptor_893a47d0a749d749, []int{4} +} +func (m *MetadataV2) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetadataV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MetadataV2.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MetadataV2) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetadataV2.Merge(m, src) +} +func (m *MetadataV2) XXX_Size() int { + return m.Size() +} +func (m *MetadataV2) XXX_DiscardUnknown() { + xxx_messageInfo_MetadataV2.DiscardUnknown(m) +} + +var xxx_messageInfo_MetadataV2 proto.InternalMessageInfo + +func (m *MetadataV2) GetType() MetricType { + if m != nil { + return m.Type + } + return UNKNOWN +} + +func (m *MetadataV2) GetHelpRef() uint32 { + if m != nil { + return m.HelpRef + } + return 0 +} + +func (m *MetadataV2) GetUnitRef() uint32 { + if m != nil { + return m.UnitRef + } + return 0 } type WriteRequest struct { - Timeseries []PreallocTimeseries `protobuf:"bytes,1,rep,name=timeseries,proto3,customtype=PreallocTimeseries" json:"timeseries"` - Source WriteRequest_SourceEnum `protobuf:"varint,2,opt,name=Source,proto3,enum=cortexpb.WriteRequest_SourceEnum" json:"Source,omitempty"` - Metadata []*MetricMetadata `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty"` - SkipLabelNameValidation bool `protobuf:"varint,1000,opt,name=skip_label_name_validation,json=skipLabelNameValidation,proto3" json:"skip_label_name_validation,omitempty"` + Timeseries []PreallocTimeseries `protobuf:"bytes,1,rep,name=timeseries,proto3,customtype=PreallocTimeseries" json:"timeseries"` + Source SourceEnum `protobuf:"varint,2,opt,name=Source,proto3,enum=cortexpb.SourceEnum" json:"Source,omitempty"` + Metadata []*MetricMetadata `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty"` + SkipLabelNameValidation bool `protobuf:"varint,1000,opt,name=skip_label_name_validation,json=skipLabelNameValidation,proto3" json:"skip_label_name_validation,omitempty"` } func (m *WriteRequest) Reset() { *m = WriteRequest{} } func (*WriteRequest) ProtoMessage() {} func (*WriteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{0} + return fileDescriptor_893a47d0a749d749, []int{5} } func (m *WriteRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -154,7 +521,7 @@ func (m *WriteRequest) XXX_DiscardUnknown() { var xxx_messageInfo_WriteRequest proto.InternalMessageInfo -func (m *WriteRequest) GetSource() WriteRequest_SourceEnum { +func (m *WriteRequest) GetSource() SourceEnum { if m != nil { return m.Source } @@ -181,7 +548,7 @@ type WriteResponse struct { func (m *WriteResponse) Reset() { *m = WriteResponse{} } func (*WriteResponse) ProtoMessage() {} func (*WriteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{1} + return fileDescriptor_893a47d0a749d749, []int{6} } func (m *WriteResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -221,7 +588,7 @@ type TimeSeries struct { func (m *TimeSeries) Reset() { *m = TimeSeries{} } func (*TimeSeries) ProtoMessage() {} func (*TimeSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{2} + return fileDescriptor_893a47d0a749d749, []int{7} } func (m *TimeSeries) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -279,7 +646,7 @@ type LabelPair struct { func (m *LabelPair) Reset() { *m = LabelPair{} } func (*LabelPair) ProtoMessage() {} func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{3} + return fileDescriptor_893a47d0a749d749, []int{8} } func (m *LabelPair) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -330,7 +697,7 @@ type Sample struct { func (m *Sample) Reset() { *m = Sample{} } func (*Sample) ProtoMessage() {} func (*Sample) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{4} + return fileDescriptor_893a47d0a749d749, []int{9} } func (m *Sample) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -374,16 +741,16 @@ func (m *Sample) GetTimestampMs() int64 { } type MetricMetadata struct { - Type MetricMetadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=cortexpb.MetricMetadata_MetricType" json:"type,omitempty"` - MetricFamilyName string `protobuf:"bytes,2,opt,name=metric_family_name,json=metricFamilyName,proto3" json:"metric_family_name,omitempty"` - Help string `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"` - Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` + Type MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=cortexpb.MetricType" json:"type,omitempty"` + MetricFamilyName string `protobuf:"bytes,2,opt,name=metric_family_name,json=metricFamilyName,proto3" json:"metric_family_name,omitempty"` + Help string `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"` + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` } func (m *MetricMetadata) Reset() { *m = MetricMetadata{} } func (*MetricMetadata) ProtoMessage() {} func (*MetricMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{5} + return fileDescriptor_893a47d0a749d749, []int{10} } func (m *MetricMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -412,7 +779,7 @@ func (m *MetricMetadata) XXX_DiscardUnknown() { var xxx_messageInfo_MetricMetadata proto.InternalMessageInfo -func (m *MetricMetadata) GetType() MetricMetadata_MetricType { +func (m *MetricMetadata) GetType() MetricType { if m != nil { return m.Type } @@ -447,7 +814,7 @@ type Metric struct { func (m *Metric) Reset() { *m = Metric{} } func (*Metric) ProtoMessage() {} func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{6} + return fileDescriptor_893a47d0a749d749, []int{11} } func (m *Metric) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -486,7 +853,7 @@ type Exemplar struct { func (m *Exemplar) Reset() { *m = Exemplar{} } func (*Exemplar) ProtoMessage() {} func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{7} + return fileDescriptor_893a47d0a749d749, []int{12} } func (m *Exemplar) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -579,7 +946,7 @@ type Histogram struct { func (m *Histogram) Reset() { *m = Histogram{} } func (*Histogram) ProtoMessage() {} func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{8} + return fileDescriptor_893a47d0a749d749, []int{13} } func (m *Histogram) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -780,7 +1147,7 @@ type BucketSpan struct { func (m *BucketSpan) Reset() { *m = BucketSpan{} } func (*BucketSpan) ProtoMessage() {} func (*BucketSpan) Descriptor() ([]byte, []int) { - return fileDescriptor_893a47d0a749d749, []int{9} + return fileDescriptor_893a47d0a749d749, []int{14} } func (m *BucketSpan) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -824,9 +1191,14 @@ func (m *BucketSpan) GetLength() uint32 { } func init() { - proto.RegisterEnum("cortexpb.WriteRequest_SourceEnum", WriteRequest_SourceEnum_name, WriteRequest_SourceEnum_value) - proto.RegisterEnum("cortexpb.MetricMetadata_MetricType", MetricMetadata_MetricType_name, MetricMetadata_MetricType_value) + proto.RegisterEnum("cortexpb.SourceEnum", SourceEnum_name, SourceEnum_value) + proto.RegisterEnum("cortexpb.MetricType", MetricType_name, MetricType_value) proto.RegisterEnum("cortexpb.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value) + proto.RegisterType((*WriteRequestV2)(nil), "cortexpb.WriteRequestV2") + proto.RegisterType((*WriteResponseV2)(nil), "cortexpb.WriteResponseV2") + proto.RegisterType((*TimeSeriesV2)(nil), "cortexpb.TimeSeriesV2") + proto.RegisterType((*ExemplarV2)(nil), "cortexpb.ExemplarV2") + proto.RegisterType((*MetadataV2)(nil), "cortexpb.MetadataV2") proto.RegisterType((*WriteRequest)(nil), "cortexpb.WriteRequest") proto.RegisterType((*WriteResponse)(nil), "cortexpb.WriteResponse") proto.RegisterType((*TimeSeries)(nil), "cortexpb.TimeSeries") @@ -842,83 +1214,97 @@ func init() { func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) } var fileDescriptor_893a47d0a749d749 = []byte{ - // 1031 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4b, 0x6f, 0x23, 0x45, - 0x17, 0xed, 0x72, 0xfb, 0x79, 0x63, 0x3b, 0x3d, 0xf5, 0x45, 0x1f, 0xad, 0x48, 0xd3, 0x71, 0x1a, - 0x01, 0x16, 0x42, 0x01, 0x05, 0x01, 0x9a, 0x51, 0x84, 0x64, 0x0f, 0xce, 0x43, 0x33, 0x76, 0xa2, - 0xb2, 0xc3, 0x68, 0xd8, 0x58, 0x15, 0xa7, 0x12, 0xb7, 0xa6, 0x5f, 0x74, 0x95, 0xa3, 0x09, 0x2b, - 0x56, 0x88, 0x25, 0x6b, 0xb6, 0x6c, 0xf8, 0x05, 0xfc, 0x86, 0x2c, 0xb3, 0x1c, 0xb1, 0x88, 0x88, - 0xb3, 0x99, 0xe5, 0x2c, 0xf8, 0x01, 0xa8, 0xaa, 0x5f, 0xce, 0x84, 0x11, 0x9b, 0xd9, 0x55, 0x9d, - 0x7b, 0xcf, 0xbd, 0xa7, 0xea, 0x9e, 0x2e, 0x35, 0xd4, 0x27, 0x41, 0x24, 0xd8, 0x8b, 0x8d, 0x30, - 0x0a, 0x44, 0x80, 0xab, 0xf1, 0x2e, 0x3c, 0x5a, 0x5d, 0x39, 0x0d, 0x4e, 0x03, 0x05, 0x7e, 0x2a, - 0x57, 0x71, 0xdc, 0xfe, 0xa3, 0x00, 0xf5, 0xa7, 0x91, 0x23, 0x18, 0x61, 0xdf, 0xcf, 0x18, 0x17, - 0xf8, 0x00, 0x40, 0x38, 0x1e, 0xe3, 0x2c, 0x72, 0x18, 0x37, 0x51, 0x4b, 0x6f, 0x2f, 0x6d, 0xae, - 0x6c, 0xa4, 0x55, 0x36, 0x46, 0x8e, 0xc7, 0x86, 0x2a, 0xd6, 0x5d, 0xbd, 0xb8, 0x5a, 0xd3, 0xfe, - 0xbc, 0x5a, 0xc3, 0x07, 0x11, 0xa3, 0xae, 0x1b, 0x4c, 0x46, 0x19, 0x8f, 0x2c, 0xd4, 0xc0, 0x0f, - 0xa0, 0x3c, 0x0c, 0x66, 0xd1, 0x84, 0x99, 0x85, 0x16, 0x6a, 0x37, 0x37, 0xd7, 0xf3, 0x6a, 0x8b, - 0x9d, 0x37, 0xe2, 0xa4, 0x9e, 0x3f, 0xf3, 0x48, 0x42, 0xc0, 0x0f, 0xa1, 0xea, 0x31, 0x41, 0x8f, - 0xa9, 0xa0, 0xa6, 0xae, 0xa4, 0x98, 0x39, 0xb9, 0xcf, 0x44, 0xe4, 0x4c, 0xfa, 0x49, 0xbc, 0x5b, - 0xbc, 0xb8, 0x5a, 0x43, 0x24, 0xcb, 0xc7, 0x5b, 0xb0, 0xca, 0x9f, 0x3b, 0xe1, 0xd8, 0xa5, 0x47, - 0xcc, 0x1d, 0xfb, 0xd4, 0x63, 0xe3, 0x33, 0xea, 0x3a, 0xc7, 0x54, 0x38, 0x81, 0x6f, 0xbe, 0xaa, - 0xb4, 0x50, 0xbb, 0x4a, 0xde, 0x93, 0x29, 0x4f, 0x64, 0xc6, 0x80, 0x7a, 0xec, 0xdb, 0x2c, 0x6e, - 0xaf, 0x01, 0xe4, 0x7a, 0x70, 0x05, 0xf4, 0xce, 0xc1, 0x9e, 0xa1, 0xe1, 0x2a, 0x14, 0xc9, 0xe1, - 0x93, 0x9e, 0x81, 0xec, 0x65, 0x68, 0x24, 0xea, 0x79, 0x18, 0xf8, 0x9c, 0xd9, 0x7f, 0x23, 0x80, - 0xfc, 0x76, 0x70, 0x07, 0xca, 0xaa, 0x73, 0x7a, 0x87, 0xff, 0xcb, 0x85, 0xab, 0x7e, 0x07, 0xd4, - 0x89, 0xba, 0x2b, 0xc9, 0x15, 0xd6, 0x15, 0xd4, 0x39, 0xa6, 0xa1, 0x60, 0x11, 0x49, 0x88, 0xf8, - 0x33, 0xa8, 0x70, 0xea, 0x85, 0x2e, 0xe3, 0x66, 0x41, 0xd5, 0x30, 0xf2, 0x1a, 0x43, 0x15, 0x50, - 0x87, 0xd6, 0x48, 0x9a, 0x86, 0xbf, 0x84, 0x1a, 0x7b, 0xc1, 0xbc, 0xd0, 0xa5, 0x11, 0x4f, 0x2e, - 0x0c, 0xe7, 0x9c, 0x5e, 0x12, 0x4a, 0x58, 0x79, 0x2a, 0x7e, 0x00, 0x30, 0x75, 0xb8, 0x08, 0x4e, - 0x23, 0xea, 0x71, 0xb3, 0xf8, 0xa6, 0xe0, 0xdd, 0x34, 0x96, 0x30, 0x17, 0x92, 0xed, 0x2f, 0xa0, - 0x96, 0x9d, 0x07, 0x63, 0x28, 0xca, 0x8b, 0x36, 0x51, 0x0b, 0xb5, 0xeb, 0x44, 0xad, 0xf1, 0x0a, - 0x94, 0xce, 0xa8, 0x3b, 0x8b, 0xa7, 0x5f, 0x27, 0xf1, 0xc6, 0xee, 0x40, 0x39, 0x3e, 0x42, 0x1e, - 0x97, 0x24, 0x94, 0xc4, 0xf1, 0x3a, 0xd4, 0x95, 0x85, 0x04, 0xf5, 0xc2, 0xb1, 0xc7, 0x15, 0x59, - 0x27, 0x4b, 0x19, 0xd6, 0xe7, 0xf6, 0xaf, 0x05, 0x68, 0xde, 0xf6, 0x00, 0xfe, 0x0a, 0x8a, 0xe2, - 0x3c, 0x8c, 0x4b, 0x35, 0x37, 0xdf, 0x7f, 0x9b, 0x57, 0x92, 0xed, 0xe8, 0x3c, 0x64, 0x44, 0x11, - 0xf0, 0x27, 0x80, 0x3d, 0x85, 0x8d, 0x4f, 0xa8, 0xe7, 0xb8, 0xe7, 0xca, 0x2f, 0xaa, 0x69, 0x8d, - 0x18, 0x71, 0x64, 0x5b, 0x05, 0xa4, 0x4d, 0xe4, 0x31, 0xa7, 0xcc, 0x0d, 0xcd, 0xa2, 0x8a, 0xab, - 0xb5, 0xc4, 0x66, 0xbe, 0x23, 0xcc, 0x52, 0x8c, 0xc9, 0xb5, 0x7d, 0x0e, 0x90, 0x77, 0xc2, 0x4b, - 0x50, 0x39, 0x1c, 0x3c, 0x1e, 0xec, 0x3f, 0x1d, 0x18, 0x9a, 0xdc, 0x3c, 0xda, 0x3f, 0x1c, 0x8c, - 0x7a, 0xc4, 0x40, 0xb8, 0x06, 0xa5, 0x9d, 0xce, 0xe1, 0x4e, 0xcf, 0x28, 0xe0, 0x06, 0xd4, 0x76, - 0xf7, 0x86, 0xa3, 0xfd, 0x1d, 0xd2, 0xe9, 0x1b, 0x3a, 0xc6, 0xd0, 0x54, 0x91, 0x1c, 0x2b, 0x4a, - 0xea, 0xf0, 0xb0, 0xdf, 0xef, 0x90, 0x67, 0x46, 0x49, 0x1a, 0x72, 0x6f, 0xb0, 0xbd, 0x6f, 0x94, - 0x71, 0x1d, 0xaa, 0xc3, 0x51, 0x67, 0xd4, 0x1b, 0xf6, 0x46, 0x46, 0xc5, 0x7e, 0x0c, 0xe5, 0xb8, - 0xf5, 0x3b, 0x30, 0xa2, 0xfd, 0x13, 0x82, 0x6a, 0x6a, 0x9e, 0x77, 0x61, 0xec, 0x5b, 0x96, 0x78, - 0xeb, 0xc8, 0xf5, 0xbb, 0x23, 0xbf, 0x2c, 0x41, 0x2d, 0x33, 0x23, 0xbe, 0x0f, 0xb5, 0x49, 0x30, - 0xf3, 0xc5, 0xd8, 0xf1, 0x85, 0x1a, 0x79, 0x71, 0x57, 0x23, 0x55, 0x05, 0xed, 0xf9, 0x02, 0xaf, - 0xc3, 0x52, 0x1c, 0x3e, 0x71, 0x03, 0x2a, 0xe2, 0x5e, 0xbb, 0x1a, 0x01, 0x05, 0x6e, 0x4b, 0x0c, - 0x1b, 0xa0, 0xf3, 0x99, 0xa7, 0x3a, 0x21, 0x22, 0x97, 0xf8, 0xff, 0x50, 0xe6, 0x93, 0x29, 0xf3, - 0xa8, 0x1a, 0xee, 0x3d, 0x92, 0xec, 0xf0, 0x07, 0xd0, 0xfc, 0x81, 0x45, 0xc1, 0x58, 0x4c, 0x23, - 0xc6, 0xa7, 0x81, 0x7b, 0xac, 0x06, 0x8d, 0x48, 0x43, 0xa2, 0xa3, 0x14, 0xc4, 0x1f, 0x26, 0x69, - 0xb9, 0xae, 0xb2, 0xd2, 0x85, 0x48, 0x5d, 0xe2, 0x8f, 0x52, 0x6d, 0x1f, 0x83, 0xb1, 0x90, 0x17, - 0x0b, 0xac, 0x28, 0x81, 0x88, 0x34, 0xb3, 0xcc, 0x58, 0x64, 0x07, 0x9a, 0x3e, 0x3b, 0xa5, 0xc2, - 0x39, 0x63, 0x63, 0x1e, 0x52, 0x9f, 0x9b, 0xd5, 0x37, 0x5f, 0xe5, 0xee, 0x6c, 0xf2, 0x9c, 0x89, - 0x61, 0x48, 0xfd, 0xe4, 0x0b, 0x6d, 0xa4, 0x0c, 0x89, 0x71, 0xfc, 0x11, 0x2c, 0x67, 0x25, 0x8e, - 0x99, 0x2b, 0x28, 0x37, 0x6b, 0x2d, 0xbd, 0x8d, 0x49, 0x56, 0xf9, 0x1b, 0x85, 0xde, 0x4a, 0x54, - 0xda, 0xb8, 0x09, 0x2d, 0xbd, 0x8d, 0xf2, 0x44, 0x25, 0x4c, 0x3e, 0x6f, 0xcd, 0x30, 0xe0, 0xce, - 0x82, 0xa8, 0xa5, 0xff, 0x16, 0x95, 0x32, 0x32, 0x51, 0x59, 0x89, 0x44, 0x54, 0x3d, 0x16, 0x95, - 0xc2, 0xb9, 0xa8, 0x2c, 0x31, 0x11, 0xd5, 0x88, 0x45, 0xa5, 0x70, 0x22, 0x6a, 0x0b, 0x20, 0x62, - 0x9c, 0x89, 0xf1, 0x54, 0xde, 0x7c, 0x53, 0x3d, 0x02, 0xf7, 0xff, 0xe5, 0x19, 0xdb, 0x20, 0x32, - 0x6b, 0xd7, 0xf1, 0x05, 0xa9, 0x45, 0xe9, 0xf2, 0x8e, 0xff, 0x96, 0xef, 0xfa, 0xef, 0x21, 0xd4, - 0x32, 0xea, 0xed, 0xef, 0xb9, 0x02, 0xfa, 0xb3, 0xde, 0xd0, 0x40, 0xb8, 0x0c, 0x85, 0xc1, 0xbe, - 0x51, 0xc8, 0xbf, 0x69, 0x7d, 0xb5, 0xf8, 0xf3, 0x6f, 0x16, 0xea, 0x56, 0xa0, 0xa4, 0xc4, 0x77, - 0xeb, 0x00, 0xf9, 0xec, 0xed, 0x2d, 0x80, 0xfc, 0xa2, 0xa4, 0xfd, 0x82, 0x93, 0x13, 0xce, 0x62, - 0x3f, 0xdf, 0x23, 0xc9, 0x4e, 0xe2, 0x2e, 0xf3, 0x4f, 0xc5, 0x54, 0xd9, 0xb8, 0x41, 0x92, 0x5d, - 0xf7, 0xeb, 0xcb, 0x6b, 0x4b, 0x7b, 0x79, 0x6d, 0x69, 0xaf, 0xaf, 0x2d, 0xf4, 0xe3, 0xdc, 0x42, - 0xbf, 0xcf, 0x2d, 0x74, 0x31, 0xb7, 0xd0, 0xe5, 0xdc, 0x42, 0x7f, 0xcd, 0x2d, 0xf4, 0x6a, 0x6e, - 0x69, 0xaf, 0xe7, 0x16, 0xfa, 0xe5, 0xc6, 0xd2, 0x2e, 0x6f, 0x2c, 0xed, 0xe5, 0x8d, 0xa5, 0x7d, - 0x97, 0xfd, 0x14, 0x1c, 0x95, 0xd5, 0x5f, 0xc0, 0xe7, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x4b, - 0xb6, 0xdb, 0xd4, 0x35, 0x08, 0x00, 0x00, -} - -func (x WriteRequest_SourceEnum) String() string { - s, ok := WriteRequest_SourceEnum_name[int32(x)] + // 1256 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xf6, 0x78, 0xfd, 0xb5, 0xaf, 0x3f, 0xb2, 0x1d, 0xa2, 0x62, 0xa2, 0x76, 0xe3, 0xae, 0x04, + 0x58, 0xa5, 0x0a, 0xc8, 0x88, 0x0a, 0xaa, 0x0a, 0xc9, 0x2e, 0x6e, 0x13, 0xda, 0x38, 0xd1, 0xd8, + 0x49, 0x55, 0x2e, 0xd6, 0xc6, 0x19, 0xc7, 0xab, 0xee, 0x17, 0x3b, 0xe3, 0xaa, 0xe1, 0xc4, 0x09, + 0xf5, 0xd8, 0xff, 0xc0, 0x85, 0xff, 0xc0, 0x1f, 0xe8, 0x31, 0xc7, 0x8a, 0x43, 0x45, 0x9d, 0x4b, + 0x8f, 0x15, 0xe2, 0x07, 0xa0, 0x99, 0xfd, 0x74, 0x52, 0xa9, 0x14, 0x7a, 0x9b, 0x79, 0xde, 0xf7, + 0x9d, 0x79, 0xde, 0x8f, 0x67, 0x76, 0xa1, 0x36, 0xf1, 0x02, 0x4e, 0x1f, 0x6f, 0xf8, 0x81, 0xc7, + 0x3d, 0x5c, 0x09, 0x77, 0xfe, 0xc1, 0xda, 0xea, 0x91, 0x77, 0xe4, 0x49, 0xf0, 0x73, 0xb1, 0x0a, + 0xed, 0xc6, 0x5f, 0x08, 0x1a, 0xf7, 0x03, 0x8b, 0x53, 0x42, 0x7f, 0x9c, 0x53, 0xc6, 0xf7, 0x3b, + 0xf8, 0x1a, 0x94, 0x86, 0xde, 0x3c, 0x98, 0xd0, 0xa6, 0xd2, 0x42, 0xed, 0x46, 0x67, 0x75, 0x23, + 0x3e, 0x63, 0x23, 0xc4, 0xfb, 0xee, 0xdc, 0x21, 0x91, 0x0f, 0x6e, 0x42, 0x99, 0x1d, 0x3b, 0x07, + 0x9e, 0xcd, 0x9a, 0x85, 0x96, 0xd2, 0x56, 0x49, 0xbc, 0xc5, 0x23, 0x00, 0x6e, 0x39, 0x94, 0xd1, + 0xc0, 0xa2, 0xac, 0x59, 0x6c, 0x29, 0xed, 0x6a, 0xe7, 0x62, 0x7a, 0xd6, 0xc8, 0x72, 0xe8, 0x50, + 0xda, 0xf6, 0x3b, 0xbd, 0x4b, 0xcf, 0x5e, 0xac, 0xe7, 0xfe, 0x78, 0xb1, 0xbe, 0xba, 0x1b, 0x50, + 0xd3, 0xb6, 0xbd, 0xc9, 0x28, 0x89, 0xdc, 0xef, 0x90, 0xcc, 0x39, 0xf8, 0x26, 0xac, 0xb1, 0x87, + 0x96, 0x3f, 0xb6, 0xcd, 0x03, 0x6a, 0x8f, 0x5d, 0xd3, 0xa1, 0xe3, 0x47, 0xa6, 0x6d, 0x1d, 0x9a, + 0xdc, 0xf2, 0xdc, 0xe6, 0xab, 0x72, 0x0b, 0xb5, 0x2b, 0xe4, 0x43, 0xe1, 0x72, 0x4f, 0x78, 0x0c, + 0x4c, 0x87, 0xee, 0x27, 0xf6, 0xef, 0x0b, 0x15, 0xa4, 0x29, 0x86, 0x05, 0x2b, 0x51, 0xce, 0xcc, + 0xf7, 0x5c, 0x46, 0xf7, 0x3b, 0x22, 0x8d, 0xa1, 0xe9, 0xf8, 0x36, 0x65, 0x4d, 0xd4, 0x42, 0x6d, + 0x85, 0xc4, 0x5b, 0xac, 0x03, 0x6c, 0x5a, 0x8c, 0x7b, 0x47, 0x81, 0xe9, 0xb0, 0x66, 0x5e, 0x1a, + 0x33, 0x08, 0xbe, 0x04, 0x6a, 0xff, 0x31, 0x75, 0x7c, 0xdb, 0x0c, 0x98, 0xac, 0x98, 0x42, 0x52, + 0xc0, 0xf8, 0x3d, 0x0f, 0xb5, 0x6c, 0xa6, 0x78, 0x1d, 0xaa, 0x92, 0x3a, 0x1b, 0x07, 0x74, 0x2a, + 0x2e, 0x53, 0xda, 0x75, 0x02, 0x21, 0x44, 0xe8, 0x94, 0xe1, 0x2f, 0xa0, 0xcc, 0x22, 0x26, 0x79, + 0x59, 0x33, 0x2d, 0x53, 0x7f, 0x69, 0xe8, 0x15, 0x44, 0xb5, 0x48, 0xec, 0x86, 0xbf, 0x01, 0x98, + 0xa5, 0x0c, 0x15, 0x19, 0xf4, 0x41, 0x1a, 0x94, 0x70, 0x8d, 0xe2, 0x32, 0xce, 0xf8, 0x6b, 0x50, + 0x69, 0x42, 0xbe, 0x20, 0x23, 0x33, 0xed, 0x8e, 0xd3, 0xd8, 0xef, 0x44, 0xa1, 0xa9, 0x33, 0xbe, + 0x0e, 0x15, 0x87, 0x72, 0xf3, 0xd0, 0xe4, 0x66, 0xb3, 0xd8, 0x42, 0xcb, 0x81, 0xdb, 0x91, 0x25, + 0x09, 0x4c, 0x7c, 0xf1, 0x67, 0x70, 0x61, 0x12, 0x50, 0x93, 0xd3, 0xc3, 0xb1, 0xec, 0x2a, 0x37, + 0x1d, 0xbf, 0x59, 0x92, 0x65, 0xd3, 0x22, 0xc3, 0x28, 0xc6, 0x0d, 0x13, 0x20, 0xe5, 0xf0, 0xf6, + 0xd2, 0xad, 0x42, 0xf1, 0x91, 0x69, 0xcf, 0xa9, 0xec, 0x12, 0x22, 0xe1, 0x46, 0x34, 0x28, 0xbd, + 0x29, 0x6a, 0x50, 0x02, 0x18, 0x36, 0x40, 0xca, 0x16, 0xb7, 0xa1, 0xc0, 0x8f, 0x7d, 0x2a, 0x67, + 0xa0, 0x71, 0x26, 0xa3, 0xc0, 0x9a, 0x8c, 0x8e, 0x7d, 0x4a, 0xa4, 0x07, 0xfe, 0x08, 0x2a, 0x33, + 0x6a, 0xfb, 0x82, 0x8a, 0x3c, 0xb4, 0x4e, 0xca, 0x62, 0x4f, 0xe8, 0x54, 0x98, 0xe6, 0xae, 0xc5, + 0xa5, 0xa9, 0x10, 0x9a, 0xc4, 0x9e, 0xd0, 0xa9, 0xf1, 0x24, 0x0f, 0xb5, 0xac, 0xdc, 0xf0, 0xee, + 0x92, 0x48, 0xd0, 0xd9, 0x0e, 0xa4, 0xa3, 0xd3, 0x5b, 0x8b, 0x24, 0x82, 0xcf, 0x4b, 0x64, 0x49, + 0x20, 0xa9, 0x7c, 0xf3, 0xff, 0x42, 0xbe, 0x37, 0x32, 0x6d, 0x0c, 0x27, 0xa7, 0x79, 0x36, 0xe9, + 0xb8, 0x3c, 0xb2, 0x95, 0x28, 0xd3, 0xca, 0xff, 0x25, 0x45, 0x63, 0x05, 0xea, 0x4b, 0x22, 0x34, + 0xfe, 0x46, 0x00, 0x69, 0xbe, 0xb8, 0x0b, 0xa5, 0xb0, 0xb5, 0x51, 0x55, 0x32, 0x13, 0x2d, 0x8f, + 0xdb, 0x35, 0xad, 0xa0, 0xb7, 0x1a, 0x15, 0xa5, 0x26, 0xa1, 0xee, 0xa1, 0xe9, 0x73, 0x1a, 0x90, + 0x28, 0xf0, 0x3f, 0x48, 0xe9, 0x7a, 0x56, 0x0f, 0x61, 0x3d, 0xf0, 0x79, 0x3d, 0x9c, 0x57, 0xc3, + 0xb2, 0x04, 0x0b, 0xef, 0x20, 0x41, 0xe3, 0x2b, 0x50, 0x93, 0x7c, 0x30, 0x86, 0x82, 0xa8, 0xa3, + 0x9c, 0xbf, 0x1a, 0x91, 0xeb, 0xe5, 0xa9, 0xae, 0x45, 0x53, 0x6d, 0x74, 0xa1, 0x14, 0xa6, 0x90, + 0xda, 0x51, 0x76, 0xea, 0xaf, 0x40, 0x2d, 0x19, 0xf2, 0x71, 0xf2, 0x70, 0x55, 0x13, 0x6c, 0x9b, + 0x19, 0x4f, 0x11, 0x34, 0x96, 0x5b, 0xfc, 0x0e, 0xf3, 0x7f, 0x0d, 0xb0, 0x23, 0xb1, 0xf1, 0xd4, + 0x74, 0x2c, 0xfb, 0x58, 0xf6, 0x5f, 0xde, 0xa2, 0x12, 0x2d, 0xb4, 0xdc, 0x96, 0x06, 0xd1, 0x76, + 0x91, 0x97, 0x50, 0x87, 0x94, 0x83, 0x4a, 0xe4, 0x5a, 0x60, 0x42, 0x16, 0xf2, 0xf5, 0x50, 0x89, + 0x5c, 0x1b, 0x77, 0xa1, 0x14, 0xde, 0xf4, 0x1e, 0xda, 0x6f, 0xfc, 0x82, 0xa0, 0x12, 0xb7, 0xec, + 0x7d, 0x8c, 0xd3, 0x9b, 0x9f, 0x97, 0xb3, 0x85, 0x56, 0xce, 0x17, 0xfa, 0xa4, 0x08, 0x6a, 0x32, + 0x02, 0xf8, 0x32, 0xa8, 0x13, 0x6f, 0xee, 0xf2, 0xb1, 0xe5, 0x72, 0x59, 0xe8, 0xc2, 0x66, 0x8e, + 0x54, 0x24, 0xb4, 0xe5, 0x72, 0x7c, 0x05, 0xaa, 0xa1, 0x79, 0x6a, 0x7b, 0x26, 0x0f, 0xef, 0xda, + 0xcc, 0x11, 0x90, 0xe0, 0x6d, 0x81, 0x61, 0x0d, 0x14, 0x36, 0x77, 0xe4, 0x4d, 0x88, 0x88, 0x25, + 0xbe, 0x08, 0x25, 0x36, 0x99, 0x51, 0xc7, 0x94, 0x15, 0xbe, 0x40, 0xa2, 0x1d, 0xfe, 0x18, 0x1a, + 0x3f, 0xd1, 0xc0, 0x1b, 0xf3, 0x59, 0x40, 0xd9, 0xcc, 0xb3, 0x0f, 0x65, 0xb5, 0x11, 0xa9, 0x0b, + 0x74, 0x14, 0x83, 0xf8, 0x93, 0xc8, 0x2d, 0xe5, 0x55, 0x92, 0xbc, 0x10, 0xa9, 0x09, 0xfc, 0x56, + 0xcc, 0xed, 0x2a, 0x68, 0x19, 0xbf, 0x90, 0x60, 0x59, 0x12, 0x44, 0xa4, 0x91, 0x78, 0x86, 0x24, + 0xbb, 0xd0, 0x70, 0xe9, 0x91, 0xc9, 0xad, 0x47, 0x74, 0xcc, 0x7c, 0xd3, 0x65, 0xcd, 0xca, 0xd9, + 0xd7, 0xad, 0x37, 0x9f, 0x3c, 0xa4, 0x7c, 0xe8, 0x9b, 0x6e, 0xa4, 0x8b, 0x7a, 0x1c, 0x21, 0x30, + 0x86, 0x3f, 0x85, 0x95, 0xe4, 0x88, 0x43, 0x6a, 0x73, 0x93, 0x35, 0xd5, 0x96, 0xd2, 0xc6, 0x24, + 0x39, 0xf9, 0x3b, 0x89, 0x2e, 0x39, 0x4a, 0x6e, 0xac, 0x09, 0x2d, 0xa5, 0x8d, 0x52, 0x47, 0x49, + 0x4c, 0x3c, 0x2a, 0x0d, 0xdf, 0x63, 0x56, 0x86, 0x54, 0xf5, 0xed, 0xa4, 0xe2, 0x88, 0x84, 0x54, + 0x72, 0x44, 0x44, 0xaa, 0x16, 0x92, 0x8a, 0xe1, 0x94, 0x54, 0xe2, 0x18, 0x91, 0xaa, 0x87, 0xa4, + 0x62, 0x38, 0x22, 0x75, 0x13, 0x20, 0xa0, 0x8c, 0xf2, 0xf1, 0x4c, 0x54, 0xbe, 0x21, 0xa5, 0x77, + 0xf9, 0x0d, 0x8f, 0xc7, 0x06, 0x11, 0x5e, 0x9b, 0x96, 0xcb, 0x89, 0x1a, 0xc4, 0xcb, 0x73, 0xf3, + 0xb7, 0x72, 0x7e, 0xfe, 0x6e, 0x80, 0x9a, 0x84, 0xe2, 0x2a, 0x94, 0xf7, 0x06, 0x77, 0x07, 0x3b, + 0xf7, 0x07, 0x5a, 0x0e, 0x97, 0x41, 0x79, 0xd0, 0x1f, 0x6a, 0x08, 0x97, 0x20, 0x3f, 0xd8, 0xd1, + 0xf2, 0x58, 0x85, 0xe2, 0x9d, 0xee, 0xde, 0x9d, 0xbe, 0xa6, 0xac, 0x15, 0x9e, 0xfc, 0xaa, 0xa3, + 0x5e, 0x19, 0x8a, 0x92, 0x7c, 0xaf, 0x06, 0x90, 0xf6, 0xde, 0xb8, 0x09, 0x90, 0x16, 0x4a, 0x8c, + 0x9f, 0x37, 0x9d, 0x32, 0x1a, 0xce, 0xf3, 0x05, 0x12, 0xed, 0x04, 0x6e, 0x53, 0xf7, 0x88, 0xcf, + 0xe4, 0x18, 0xd7, 0x49, 0xb4, 0xbb, 0xba, 0x0e, 0x90, 0x7e, 0x8b, 0x04, 0x89, 0xee, 0xee, 0x96, + 0x96, 0xc3, 0x15, 0x28, 0x90, 0xbd, 0x7b, 0x7d, 0x0d, 0x5d, 0x3d, 0x96, 0x5f, 0xe5, 0xe8, 0xc5, + 0x59, 0xa6, 0x5c, 0x85, 0xf2, 0xad, 0x9d, 0xbd, 0xc1, 0xa8, 0x4f, 0x34, 0x94, 0xd2, 0xcd, 0xe3, + 0x3a, 0xa8, 0x9b, 0x5b, 0xc3, 0xd1, 0xce, 0x1d, 0xd2, 0xdd, 0xd6, 0x14, 0x8c, 0xa1, 0x21, 0x2d, + 0x29, 0x56, 0x10, 0xa1, 0xc3, 0xbd, 0xed, 0xed, 0x2e, 0x79, 0xa0, 0x15, 0xc5, 0x65, 0x5b, 0x83, + 0xdb, 0x3b, 0x5a, 0x09, 0xd7, 0xa0, 0x32, 0x1c, 0x75, 0x47, 0xfd, 0x61, 0x7f, 0xa4, 0x95, 0x7b, + 0xdf, 0x9e, 0xbc, 0xd4, 0x73, 0xcf, 0x5f, 0xea, 0xb9, 0xd7, 0x2f, 0x75, 0xf4, 0xf3, 0x42, 0x47, + 0xbf, 0x2d, 0x74, 0xf4, 0x6c, 0xa1, 0xa3, 0x93, 0x85, 0x8e, 0xfe, 0x5c, 0xe8, 0xe8, 0xd5, 0x42, + 0xcf, 0xbd, 0x5e, 0xe8, 0xe8, 0xe9, 0xa9, 0x9e, 0x3b, 0x39, 0xd5, 0x73, 0xcf, 0x4f, 0xf5, 0xdc, + 0x0f, 0xc9, 0x7f, 0xf6, 0x41, 0x49, 0xfe, 0x58, 0x7f, 0xf9, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x2f, 0x9f, 0x19, 0xc5, 0x88, 0x0b, 0x00, 0x00, +} + +func (x SourceEnum) String() string { + s, ok := SourceEnum_name[int32(x)] if ok { return s } return strconv.Itoa(int(x)) } -func (x MetricMetadata_MetricType) String() string { - s, ok := MetricMetadata_MetricType_name[int32(x)] +func (x MetricType) String() string { + s, ok := MetricType_name[int32(x)] if ok { return s } @@ -931,14 +1317,14 @@ func (x Histogram_ResetHint) String() string { } return strconv.Itoa(int(x)) } -func (this *WriteRequest) Equal(that interface{}) bool { +func (this *WriteRequestV2) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*WriteRequest) + that1, ok := that.(*WriteRequestV2) if !ok { - that2, ok := that.(WriteRequest) + that2, ok := that.(WriteRequestV2) if ok { that1 = &that2 } else { @@ -950,22 +1336,22 @@ func (this *WriteRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Timeseries) != len(that1.Timeseries) { + if this.Source != that1.Source { return false } - for i := range this.Timeseries { - if !this.Timeseries[i].Equal(that1.Timeseries[i]) { + if len(this.Symbols) != len(that1.Symbols) { + return false + } + for i := range this.Symbols { + if this.Symbols[i] != that1.Symbols[i] { return false } } - if this.Source != that1.Source { - return false - } - if len(this.Metadata) != len(that1.Metadata) { + if len(this.Timeseries) != len(that1.Timeseries) { return false } - for i := range this.Metadata { - if !this.Metadata[i].Equal(that1.Metadata[i]) { + for i := range this.Timeseries { + if !this.Timeseries[i].Equal(that1.Timeseries[i]) { return false } } @@ -974,14 +1360,14 @@ func (this *WriteRequest) Equal(that interface{}) bool { } return true } -func (this *WriteResponse) Equal(that interface{}) bool { +func (this *WriteResponseV2) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*WriteResponse) + that1, ok := that.(*WriteResponseV2) if !ok { - that2, ok := that.(WriteResponse) + that2, ok := that.(WriteResponseV2) if ok { that1 = &that2 } else { @@ -993,8 +1379,205 @@ func (this *WriteResponse) Equal(that interface{}) bool { } else if this == nil { return false } - return true -} + if this.Samples != that1.Samples { + return false + } + if this.Histograms != that1.Histograms { + return false + } + if this.Exemplars != that1.Exemplars { + return false + } + return true +} +func (this *TimeSeriesV2) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TimeSeriesV2) + if !ok { + that2, ok := that.(TimeSeriesV2) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.LabelsRefs) != len(that1.LabelsRefs) { + return false + } + for i := range this.LabelsRefs { + if this.LabelsRefs[i] != that1.LabelsRefs[i] { + return false + } + } + if len(this.Samples) != len(that1.Samples) { + return false + } + for i := range this.Samples { + if !this.Samples[i].Equal(&that1.Samples[i]) { + return false + } + } + if len(this.Histograms) != len(that1.Histograms) { + return false + } + for i := range this.Histograms { + if !this.Histograms[i].Equal(&that1.Histograms[i]) { + return false + } + } + if len(this.Exemplars) != len(that1.Exemplars) { + return false + } + for i := range this.Exemplars { + if !this.Exemplars[i].Equal(&that1.Exemplars[i]) { + return false + } + } + if !this.Metadata.Equal(&that1.Metadata) { + return false + } + if this.CreatedTimestamp != that1.CreatedTimestamp { + return false + } + return true +} +func (this *ExemplarV2) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ExemplarV2) + if !ok { + that2, ok := that.(ExemplarV2) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.LabelsRefs) != len(that1.LabelsRefs) { + return false + } + for i := range this.LabelsRefs { + if this.LabelsRefs[i] != that1.LabelsRefs[i] { + return false + } + } + if this.Value != that1.Value { + return false + } + if this.Timestamp != that1.Timestamp { + return false + } + return true +} +func (this *MetadataV2) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*MetadataV2) + if !ok { + that2, ok := that.(MetadataV2) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if this.HelpRef != that1.HelpRef { + return false + } + if this.UnitRef != that1.UnitRef { + return false + } + return true +} +func (this *WriteRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*WriteRequest) + if !ok { + that2, ok := that.(WriteRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Timeseries) != len(that1.Timeseries) { + return false + } + for i := range this.Timeseries { + if !this.Timeseries[i].Equal(that1.Timeseries[i]) { + return false + } + } + if this.Source != that1.Source { + return false + } + if len(this.Metadata) != len(that1.Metadata) { + return false + } + for i := range this.Metadata { + if !this.Metadata[i].Equal(that1.Metadata[i]) { + return false + } + } + if this.SkipLabelNameValidation != that1.SkipLabelNameValidation { + return false + } + return true +} +func (this *WriteResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*WriteResponse) + if !ok { + that2, ok := that.(WriteResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} func (this *TimeSeries) Equal(that interface{}) bool { if that == nil { return this == nil @@ -1424,6 +2007,88 @@ func (this *BucketSpan) Equal(that interface{}) bool { } return true } +func (this *WriteRequestV2) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&cortexpb.WriteRequestV2{") + s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n") + s = append(s, "Symbols: "+fmt.Sprintf("%#v", this.Symbols)+",\n") + s = append(s, "Timeseries: "+fmt.Sprintf("%#v", this.Timeseries)+",\n") + s = append(s, "SkipLabelNameValidation: "+fmt.Sprintf("%#v", this.SkipLabelNameValidation)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *WriteResponseV2) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&cortexpb.WriteResponseV2{") + s = append(s, "Samples: "+fmt.Sprintf("%#v", this.Samples)+",\n") + s = append(s, "Histograms: "+fmt.Sprintf("%#v", this.Histograms)+",\n") + s = append(s, "Exemplars: "+fmt.Sprintf("%#v", this.Exemplars)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TimeSeriesV2) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&cortexpb.TimeSeriesV2{") + s = append(s, "LabelsRefs: "+fmt.Sprintf("%#v", this.LabelsRefs)+",\n") + if this.Samples != nil { + vs := make([]*Sample, len(this.Samples)) + for i := range vs { + vs[i] = &this.Samples[i] + } + s = append(s, "Samples: "+fmt.Sprintf("%#v", vs)+",\n") + } + if this.Histograms != nil { + vs := make([]*Histogram, len(this.Histograms)) + for i := range vs { + vs[i] = &this.Histograms[i] + } + s = append(s, "Histograms: "+fmt.Sprintf("%#v", vs)+",\n") + } + if this.Exemplars != nil { + vs := make([]*ExemplarV2, len(this.Exemplars)) + for i := range vs { + vs[i] = &this.Exemplars[i] + } + s = append(s, "Exemplars: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "Metadata: "+strings.Replace(this.Metadata.GoString(), `&`, ``, 1)+",\n") + s = append(s, "CreatedTimestamp: "+fmt.Sprintf("%#v", this.CreatedTimestamp)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExemplarV2) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&cortexpb.ExemplarV2{") + s = append(s, "LabelsRefs: "+fmt.Sprintf("%#v", this.LabelsRefs)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MetadataV2) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&cortexpb.MetadataV2{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "HelpRef: "+fmt.Sprintf("%#v", this.HelpRef)+",\n") + s = append(s, "UnitRef: "+fmt.Sprintf("%#v", this.UnitRef)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *WriteRequest) GoString() string { if this == nil { return "nil" @@ -1625,7 +2290,7 @@ func valueToGoStringCortex(v interface{}, typ string) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) } -func (m *WriteRequest) Marshal() (dAtA []byte, err error) { +func (m *WriteRequestV2) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1635,12 +2300,12 @@ func (m *WriteRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *WriteRequestV2) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *WriteRequestV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1657,26 +2322,315 @@ func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0xc0 } - if len(m.Metadata) > 0 { - for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Timeseries) > 0 { + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { + size := m.Timeseries[iNdEx].Size() + i -= size + if _, err := m.Timeseries[iNdEx].MarshalTo(dAtA[i:]); err != nil { return 0, err } - i -= size i = encodeVarintCortex(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x2a + } + } + if len(m.Symbols) > 0 { + for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Symbols[iNdEx]) + copy(dAtA[i:], m.Symbols[iNdEx]) + i = encodeVarintCortex(dAtA, i, uint64(len(m.Symbols[iNdEx]))) + i-- + dAtA[i] = 0x22 } } if m.Source != 0 { i = encodeVarintCortex(dAtA, i, uint64(m.Source)) i-- - dAtA[i] = 0x10 + dAtA[i] = 0x18 } - if len(m.Timeseries) > 0 { + return len(dAtA) - i, nil +} + +func (m *WriteResponseV2) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WriteResponseV2) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WriteResponseV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Exemplars != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Exemplars)) + i-- + dAtA[i] = 0x18 + } + if m.Histograms != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Histograms)) + i-- + dAtA[i] = 0x10 + } + if m.Samples != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Samples)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TimeSeriesV2) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeriesV2) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeSeriesV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CreatedTimestamp != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.CreatedTimestamp)) + i-- + dAtA[i] = 0x30 + } + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Histograms) > 0 { + for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.LabelsRefs) > 0 { + dAtA3 := make([]byte, len(m.LabelsRefs)*10) + var j2 int + for _, num := range m.LabelsRefs { + for num >= 1<<7 { + dAtA3[j2] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j2++ + } + dAtA3[j2] = uint8(num) + j2++ + } + i -= j2 + copy(dAtA[i:], dAtA3[:j2]) + i = encodeVarintCortex(dAtA, i, uint64(j2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExemplarV2) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExemplarV2) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExemplarV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timestamp != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x18 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if len(m.LabelsRefs) > 0 { + dAtA5 := make([]byte, len(m.LabelsRefs)*10) + var j4 int + for _, num := range m.LabelsRefs { + for num >= 1<<7 { + dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j4++ + } + dAtA5[j4] = uint8(num) + j4++ + } + i -= j4 + copy(dAtA[i:], dAtA5[:j4]) + i = encodeVarintCortex(dAtA, i, uint64(j4)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MetadataV2) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetadataV2) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetadataV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.UnitRef != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.UnitRef)) + i-- + dAtA[i] = 0x20 + } + if m.HelpRef != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.HelpRef)) + i-- + dAtA[i] = 0x18 + } + if m.Type != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *WriteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SkipLabelNameValidation { + i-- + if m.SkipLabelNameValidation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3e + i-- + dAtA[i] = 0xc0 + } + if len(m.Metadata) > 0 { + for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCortex(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Source != 0 { + i = encodeVarintCortex(dAtA, i, uint64(m.Source)) + i-- + dAtA[i] = 0x10 + } + if len(m.Timeseries) > 0 { for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { { size := m.Timeseries[iNdEx].Size() @@ -2032,30 +2986,30 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { } if len(m.PositiveCounts) > 0 { for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- { - f1 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) + f6 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6)) } i = encodeVarintCortex(dAtA, i, uint64(len(m.PositiveCounts)*8)) i-- dAtA[i] = 0x6a } if len(m.PositiveDeltas) > 0 { - var j2 int - dAtA4 := make([]byte, len(m.PositiveDeltas)*10) + var j7 int + dAtA9 := make([]byte, len(m.PositiveDeltas)*10) for _, num := range m.PositiveDeltas { - x3 := (uint64(num) << 1) ^ uint64((num >> 63)) - for x3 >= 1<<7 { - dAtA4[j2] = uint8(uint64(x3)&0x7f | 0x80) - j2++ - x3 >>= 7 - } - dAtA4[j2] = uint8(x3) - j2++ + x8 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x8 >= 1<<7 { + dAtA9[j7] = uint8(uint64(x8)&0x7f | 0x80) + j7++ + x8 >>= 7 + } + dAtA9[j7] = uint8(x8) + j7++ } - i -= j2 - copy(dAtA[i:], dAtA4[:j2]) - i = encodeVarintCortex(dAtA, i, uint64(j2)) + i -= j7 + copy(dAtA[i:], dAtA9[:j7]) + i = encodeVarintCortex(dAtA, i, uint64(j7)) i-- dAtA[i] = 0x62 } @@ -2075,30 +3029,30 @@ func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { } if len(m.NegativeCounts) > 0 { for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- { - f5 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) + f10 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5)) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f10)) } i = encodeVarintCortex(dAtA, i, uint64(len(m.NegativeCounts)*8)) i-- dAtA[i] = 0x52 } if len(m.NegativeDeltas) > 0 { - var j6 int - dAtA8 := make([]byte, len(m.NegativeDeltas)*10) + var j11 int + dAtA13 := make([]byte, len(m.NegativeDeltas)*10) for _, num := range m.NegativeDeltas { - x7 := (uint64(num) << 1) ^ uint64((num >> 63)) - for x7 >= 1<<7 { - dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80) - j6++ - x7 >>= 7 - } - dAtA8[j6] = uint8(x7) - j6++ - } - i -= j6 - copy(dAtA[i:], dAtA8[:j6]) - i = encodeVarintCortex(dAtA, i, uint64(j6)) + x12 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x12 >= 1<<7 { + dAtA13[j11] = uint8(uint64(x12)&0x7f | 0x80) + j11++ + x12 >>= 7 + } + dAtA13[j11] = uint8(x12) + j11++ + } + i -= j11 + copy(dAtA[i:], dAtA13[:j11]) + i = encodeVarintCortex(dAtA, i, uint64(j11)) i-- dAtA[i] = 0x4a } @@ -2244,23 +3198,23 @@ func encodeVarintCortex(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *WriteRequest) Size() (n int) { +func (m *WriteRequestV2) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Timeseries) > 0 { - for _, e := range m.Timeseries { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) - } - } if m.Source != 0 { n += 1 + sovCortex(uint64(m.Source)) } - if len(m.Metadata) > 0 { - for _, e := range m.Metadata { + if len(m.Symbols) > 0 { + for _, s := range m.Symbols { + l = len(s) + n += 1 + l + sovCortex(uint64(l)) + } + } + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { l = e.Size() n += 1 + l + sovCortex(uint64(l)) } @@ -2271,25 +3225,149 @@ func (m *WriteRequest) Size() (n int) { return n } -func (m *WriteResponse) Size() (n int) { +func (m *WriteResponseV2) Size() (n int) { if m == nil { return 0 } var l int _ = l + if m.Samples != 0 { + n += 1 + sovCortex(uint64(m.Samples)) + } + if m.Histograms != 0 { + n += 1 + sovCortex(uint64(m.Histograms)) + } + if m.Exemplars != 0 { + n += 1 + sovCortex(uint64(m.Exemplars)) + } return n } -func (m *TimeSeries) Size() (n int) { +func (m *TimeSeriesV2) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovCortex(uint64(l)) + if len(m.LabelsRefs) > 0 { + l = 0 + for _, e := range m.LabelsRefs { + l += sovCortex(uint64(e)) + } + n += 1 + sovCortex(uint64(l)) + l + } + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + if len(m.Histograms) > 0 { + for _, e := range m.Histograms { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + l = m.Metadata.Size() + n += 1 + l + sovCortex(uint64(l)) + if m.CreatedTimestamp != 0 { + n += 1 + sovCortex(uint64(m.CreatedTimestamp)) + } + return n +} + +func (m *ExemplarV2) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LabelsRefs) > 0 { + l = 0 + for _, e := range m.LabelsRefs { + l += sovCortex(uint64(e)) + } + n += 1 + sovCortex(uint64(l)) + l + } + if m.Value != 0 { + n += 9 + } + if m.Timestamp != 0 { + n += 1 + sovCortex(uint64(m.Timestamp)) + } + return n +} + +func (m *MetadataV2) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovCortex(uint64(m.Type)) + } + if m.HelpRef != 0 { + n += 1 + sovCortex(uint64(m.HelpRef)) + } + if m.UnitRef != 0 { + n += 1 + sovCortex(uint64(m.UnitRef)) + } + return n +} + +func (m *WriteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + if m.Source != 0 { + n += 1 + sovCortex(uint64(m.Source)) + } + if len(m.Metadata) > 0 { + for _, e := range m.Metadata { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) + } + } + if m.SkipLabelNameValidation { + n += 3 + } + return n +} + +func (m *WriteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *TimeSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovCortex(uint64(l)) } } if len(m.Samples) > 0 { @@ -2524,6 +3602,85 @@ func sovCortex(x uint64) (n int) { func sozCortex(x uint64) (n int) { return sovCortex(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *WriteRequestV2) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WriteRequestV2{`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `Symbols:` + fmt.Sprintf("%v", this.Symbols) + `,`, + `Timeseries:` + fmt.Sprintf("%v", this.Timeseries) + `,`, + `SkipLabelNameValidation:` + fmt.Sprintf("%v", this.SkipLabelNameValidation) + `,`, + `}`, + }, "") + return s +} +func (this *WriteResponseV2) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WriteResponseV2{`, + `Samples:` + fmt.Sprintf("%v", this.Samples) + `,`, + `Histograms:` + fmt.Sprintf("%v", this.Histograms) + `,`, + `Exemplars:` + fmt.Sprintf("%v", this.Exemplars) + `,`, + `}`, + }, "") + return s +} +func (this *TimeSeriesV2) String() string { + if this == nil { + return "nil" + } + repeatedStringForSamples := "[]Sample{" + for _, f := range this.Samples { + repeatedStringForSamples += strings.Replace(strings.Replace(f.String(), "Sample", "Sample", 1), `&`, ``, 1) + "," + } + repeatedStringForSamples += "}" + repeatedStringForHistograms := "[]Histogram{" + for _, f := range this.Histograms { + repeatedStringForHistograms += strings.Replace(strings.Replace(f.String(), "Histogram", "Histogram", 1), `&`, ``, 1) + "," + } + repeatedStringForHistograms += "}" + repeatedStringForExemplars := "[]ExemplarV2{" + for _, f := range this.Exemplars { + repeatedStringForExemplars += strings.Replace(strings.Replace(f.String(), "ExemplarV2", "ExemplarV2", 1), `&`, ``, 1) + "," + } + repeatedStringForExemplars += "}" + s := strings.Join([]string{`&TimeSeriesV2{`, + `LabelsRefs:` + fmt.Sprintf("%v", this.LabelsRefs) + `,`, + `Samples:` + repeatedStringForSamples + `,`, + `Histograms:` + repeatedStringForHistograms + `,`, + `Exemplars:` + repeatedStringForExemplars + `,`, + `Metadata:` + strings.Replace(strings.Replace(this.Metadata.String(), "MetadataV2", "MetadataV2", 1), `&`, ``, 1) + `,`, + `CreatedTimestamp:` + fmt.Sprintf("%v", this.CreatedTimestamp) + `,`, + `}`, + }, "") + return s +} +func (this *ExemplarV2) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExemplarV2{`, + `LabelsRefs:` + fmt.Sprintf("%v", this.LabelsRefs) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `}`, + }, "") + return s +} +func (this *MetadataV2) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetadataV2{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `HelpRef:` + fmt.Sprintf("%v", this.HelpRef) + `,`, + `UnitRef:` + fmt.Sprintf("%v", this.UnitRef) + `,`, + `}`, + }, "") + return s +} func (this *WriteRequest) String() string { if this == nil { return "nil" @@ -2727,6 +3884,826 @@ func valueToStringCortex(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *WriteRequestV2) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WriteRequestV2: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WriteRequestV2: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + m.Source = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Source |= SourceEnum(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Symbols = append(m.Symbols, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeseries = append(m.Timeseries, PreallocTimeseriesV2{}) + if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 1000: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipLabelNameValidation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipLabelNameValidation = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WriteResponseV2) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WriteResponseV2: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WriteResponseV2: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + m.Samples = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Samples |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + m.Histograms = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Histograms |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + m.Exemplars = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Exemplars |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeriesV2) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeriesV2: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeriesV2: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.LabelsRefs) == 0 { + m.LabelsRefs = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType) + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, Sample{}) + if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Histograms = append(m.Histograms, Histogram{}) + if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, ExemplarV2{}) + if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + } + m.CreatedTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedTimestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExemplarV2) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExemplarV2: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExemplarV2: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthCortex + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthCortex + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.LabelsRefs) == 0 { + m.LabelsRefs = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType) + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetadataV2) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetadataV2: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetadataV2: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= MetricType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HelpRef", wireType) + } + m.HelpRef = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HelpRef |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnitRef", wireType) + } + m.UnitRef = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnitRef |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCortex(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCortex + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *WriteRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -2804,7 +4781,7 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Source |= WriteRequest_SourceEnum(b&0x7F) << shift + m.Source |= SourceEnum(b&0x7F) << shift if b < 0x80 { break } @@ -3376,7 +5353,7 @@ func (m *MetricMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= MetricMetadata_MetricType(b&0x7F) << shift + m.Type |= MetricType(b&0x7F) << shift if b < 0x80 { break } diff --git a/pkg/cortexpb/cortex.proto b/pkg/cortexpb/cortex.proto index cedb173183..b16074c007 100644 --- a/pkg/cortexpb/cortex.proto +++ b/pkg/cortexpb/cortex.proto @@ -9,12 +9,110 @@ import "gogoproto/gogo.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; +enum SourceEnum { + API = 0; + RULE = 1; +} + +enum MetricType { + UNKNOWN = 0; + COUNTER = 1; + GAUGE = 2; + HISTOGRAM = 3; + GAUGEHISTOGRAM = 4; + SUMMARY = 5; + INFO = 6; + STATESET = 7; +} + +// https://github.com/prometheus/prometheus/blob/main/prompb/io/prometheus/write/v2/types.proto +message WriteRequestV2 { + reserved 1 to 2; + cortexpb.SourceEnum Source = 3; + repeated string symbols = 4; + repeated TimeSeriesV2 timeseries = 5 [(gogoproto.nullable) = false, (gogoproto.customtype) = "PreallocTimeseriesV2"]; + + bool skip_label_name_validation = 1000; // set intentionally high to keep WriteRequest compatible with upstream Prometheus +} + +message WriteResponseV2 { + // Samples represents X-Prometheus-Remote-Write-Written-Samples + int64 Samples = 1; + // Histograms represents X-Prometheus-Remote-Write-Written-Histograms + int64 Histograms = 2; + // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars + int64 Exemplars = 3; +} + +message TimeSeriesV2 { + repeated uint32 labels_refs = 1; + // Timeseries messages can either specify samples or (native) histogram samples + // (histogram field), but not both. For a typical sender (real-time metric + // streaming), in healthy cases, there will be only one sample or histogram. + // + // Samples and histograms are sorted by timestamp (older first). + repeated Sample samples = 2 [(gogoproto.nullable) = false]; + repeated Histogram histograms = 3 [(gogoproto.nullable) = false]; + + // exemplars represents an optional set of exemplars attached to this series' samples. + repeated ExemplarV2 exemplars = 4 [(gogoproto.nullable) = false]; + + // metadata represents the metadata associated with the given series' samples. + MetadataV2 metadata = 5 [(gogoproto.nullable) = false]; + + // created_timestamp represents an optional created timestamp associated with + // this series' samples in ms format, typically for counter or histogram type + // metrics. Created timestamp represents the time when the counter started + // counting (sometimes referred to as start timestamp), which can increase + // the accuracy of query results. + // + // Note that some receivers might require this and in return fail to + // ingest such samples within the Request. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + // + // Note that the "optional" keyword is omitted due to + // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields + // Zero value means value not set. If you need to use exactly zero value for + // the timestamp, use 1 millisecond before or after. + int64 created_timestamp = 6; +} + +// Exemplar is an additional information attached to some series' samples. +// It is typically used to attach an example trace or request ID associated with +// the metric changes. +message ExemplarV2 { + // labels_refs is an optional list of label name-value pair references, encoded + // as indices to the Request.symbols array. This list's len is always + // a multiple of 2, and the underlying labels should be sorted lexicographically. + // If the exemplar references a trace it should use the `trace_id` label name, as a best practice. + repeated uint32 labels_refs = 1; + // value represents an exact example value. This can be useful when the exemplar + // is attached to a histogram, which only gives an estimated value through buckets. + double value = 2; + // timestamp represents the timestamp of the exemplar in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + int64 timestamp = 3; +} + +// Metadata represents the metadata associated with the given series' samples. +message MetadataV2 { + MetricType type = 1; + // help_ref is a reference to the Request.symbols array representing help + // text for the metric. Help is optional, reference should point to an empty string in + // such a case. + uint32 help_ref = 3; + // unit_ref is a reference to the Request.symbols array representing a unit + // for the metric. Unit is optional, reference should point to an empty string in + // such a case. + uint32 unit_ref = 4; +} + message WriteRequest { repeated TimeSeries timeseries = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "PreallocTimeseries"]; - enum SourceEnum { - API = 0; - RULE = 1; - } SourceEnum Source = 2; repeated MetricMetadata metadata = 3 [(gogoproto.nullable) = true]; @@ -42,17 +140,6 @@ message Sample { } message MetricMetadata { - enum MetricType { - UNKNOWN = 0; - COUNTER = 1; - GAUGE = 2; - HISTOGRAM = 3; - GAUGEHISTOGRAM = 4; - SUMMARY = 5; - INFO = 6; - STATESET = 7; - } - MetricType type = 1; string metric_family_name = 2; string help = 4; diff --git a/pkg/cortexpbv2/timeseriesv2.go b/pkg/cortexpb/timeseriesv2.go similarity index 66% rename from pkg/cortexpbv2/timeseriesv2.go rename to pkg/cortexpb/timeseriesv2.go index 684e4bfac0..28eca0f973 100644 --- a/pkg/cortexpbv2/timeseriesv2.go +++ b/pkg/cortexpb/timeseriesv2.go @@ -1,59 +1,52 @@ -package cortexpbv2 +package cortexpb import ( "sync" - - "github.com/cortexproject/cortex/pkg/cortexpb" ) var ( - expectedTimeseries = 100 - expectedLabels = 20 - expectedSymbols = 20 - expectedSamplesPerSeries = 10 - expectedExemplarsPerSeries = 1 - expectedHistogramsPerSeries = 1 - - slicePool = sync.Pool{ + expectedSymbols = 20 + + slicePoolV2 = sync.Pool{ New: func() interface{} { return make([]PreallocTimeseriesV2, 0, expectedTimeseries) }, } - timeSeriesPool = sync.Pool{ + timeSeriesPoolV2 = sync.Pool{ New: func() interface{} { - return &TimeSeries{ + return &TimeSeriesV2{ LabelsRefs: make([]uint32, 0, expectedLabels), Samples: make([]Sample, 0, expectedSamplesPerSeries), Histograms: make([]Histogram, 0, expectedHistogramsPerSeries), - Exemplars: make([]Exemplar, 0, expectedExemplarsPerSeries), - Metadata: Metadata{}, + Exemplars: make([]ExemplarV2, 0, expectedExemplarsPerSeries), + Metadata: MetadataV2{}, } }, } - writeRequestPool = sync.Pool{ + writeRequestPoolV2 = sync.Pool{ New: func() interface{} { return &PreallocWriteRequestV2{ - WriteRequest: WriteRequest{ + WriteRequestV2: WriteRequestV2{ Symbols: make([]string, 0, expectedSymbols), }, } }, } - bytePool = cortexpb.NewSlicePool(20) + bytePoolV2 = NewSlicePool(20) ) // PreallocWriteRequestV2 is a WriteRequest which preallocs slices on Unmarshal. type PreallocWriteRequestV2 struct { - WriteRequest + WriteRequestV2 data *[]byte } // Unmarshal implements proto.Message. func (p *PreallocWriteRequestV2) Unmarshal(dAtA []byte) error { p.Timeseries = PreallocTimeseriesV2SliceFromPool() - return p.WriteRequest.Unmarshal(dAtA) + return p.WriteRequestV2.Unmarshal(dAtA) } func (p *PreallocWriteRequestV2) Marshal() (dAtA []byte, err error) { @@ -69,53 +62,53 @@ func (p *PreallocWriteRequestV2) Marshal() (dAtA []byte, err error) { // PreallocTimeseriesV2 is a TimeSeries which preallocs slices on Unmarshal. type PreallocTimeseriesV2 struct { - *TimeSeries + *TimeSeriesV2 } // Unmarshal implements proto.Message. func (p *PreallocTimeseriesV2) Unmarshal(dAtA []byte) error { - p.TimeSeries = TimeseriesV2FromPool() - return p.TimeSeries.Unmarshal(dAtA) + p.TimeSeriesV2 = TimeseriesV2FromPool() + return p.TimeSeriesV2.Unmarshal(dAtA) } func ReuseWriteRequestV2(req *PreallocWriteRequestV2) { if req.data != nil { - bytePool.ReuseSlice(req.data) + bytePoolV2.ReuseSlice(req.data) req.data = nil } req.Source = 0 req.Symbols = nil req.Timeseries = nil - writeRequestPool.Put(req) + writeRequestPoolV2.Put(req) } func PreallocWriteRequestV2FromPool() *PreallocWriteRequestV2 { - return writeRequestPool.Get().(*PreallocWriteRequestV2) + return writeRequestPoolV2.Get().(*PreallocWriteRequestV2) } // PreallocTimeseriesV2SliceFromPool retrieves a slice of PreallocTimeseriesV2 from a sync.Pool. // ReuseSlice should be called once done. func PreallocTimeseriesV2SliceFromPool() []PreallocTimeseriesV2 { - return slicePool.Get().([]PreallocTimeseriesV2) + return slicePoolV2.Get().([]PreallocTimeseriesV2) } // ReuseSlice puts the slice back into a sync.Pool for reuse. -func ReuseSlice(ts []PreallocTimeseriesV2) { +func ReuseSliceV2(ts []PreallocTimeseriesV2) { for i := range ts { - ReuseTimeseries(ts[i].TimeSeries) + ReuseTimeseriesV2(ts[i].TimeSeriesV2) } - slicePool.Put(ts[:0]) //nolint:staticcheck //see comment on slicePool for more details + slicePoolV2.Put(ts[:0]) //nolint:staticcheck //see comment on slicePool for more details } // TimeseriesV2FromPool retrieves a pointer to a TimeSeries from a sync.Pool. // ReuseTimeseries should be called once done, unless ReuseSlice was called on the slice that contains this TimeSeries. -func TimeseriesV2FromPool() *TimeSeries { - return timeSeriesPool.Get().(*TimeSeries) +func TimeseriesV2FromPool() *TimeSeriesV2 { + return timeSeriesPoolV2.Get().(*TimeSeriesV2) } // ReuseTimeseries puts the timeseries back into a sync.Pool for reuse. -func ReuseTimeseries(ts *TimeSeries) { +func ReuseTimeseriesV2(ts *TimeSeriesV2) { // clear ts lableRef and samples ts.LabelsRefs = ts.LabelsRefs[:0] ts.Samples = ts.Samples[:0] @@ -131,6 +124,6 @@ func ReuseTimeseries(ts *TimeSeries) { ts.Exemplars = ts.Exemplars[:0] ts.Histograms = ts.Histograms[:0] - ts.Metadata = Metadata{} - timeSeriesPool.Put(ts) + ts.Metadata = MetadataV2{} + timeSeriesPoolV2.Put(ts) } diff --git a/pkg/cortexpbv2/timeseriesv2_test.go b/pkg/cortexpb/timeseriesv2_test.go similarity index 82% rename from pkg/cortexpbv2/timeseriesv2_test.go rename to pkg/cortexpb/timeseriesv2_test.go index d10527564e..1a0c007f6f 100644 --- a/pkg/cortexpbv2/timeseriesv2_test.go +++ b/pkg/cortexpb/timeseriesv2_test.go @@ -1,4 +1,4 @@ -package cortexpbv2 +package cortexpb import ( "fmt" @@ -19,8 +19,8 @@ func TestPreallocTimeseriesV2SliceFromPool(t *testing.T) { t.Run("instance is cleaned before reusing", func(t *testing.T) { slice := PreallocTimeseriesV2SliceFromPool() - slice = append(slice, PreallocTimeseriesV2{TimeSeries: &TimeSeries{}}) - ReuseSlice(slice) + slice = append(slice, PreallocTimeseriesV2{TimeSeriesV2: &TimeSeriesV2{}}) + ReuseSliceV2(slice) reused := PreallocTimeseriesV2SliceFromPool() assert.Len(t, reused, 0) @@ -38,11 +38,11 @@ func TestTimeseriesV2FromPool(t *testing.T) { t.Run("instance is cleaned before reusing", func(t *testing.T) { ts := TimeseriesV2FromPool() ts.LabelsRefs = []uint32{1, 2} - ts.Samples = []Sample{{Value: 1, Timestamp: 2}} - ts.Exemplars = []Exemplar{{LabelsRefs: []uint32{1, 2}, Value: 1, Timestamp: 2}} + ts.Samples = []Sample{{Value: 1, TimestampMs: 2}} + ts.Exemplars = []ExemplarV2{{LabelsRefs: []uint32{1, 2}, Value: 1, Timestamp: 2}} ts.Histograms = []Histogram{{}} fmt.Println("ts.Histograms", len(ts.Histograms)) - ReuseTimeseries(ts) + ReuseTimeseriesV2(ts) reused := TimeseriesV2FromPool() assert.Len(t, reused.LabelsRefs, 0) @@ -52,13 +52,13 @@ func TestTimeseriesV2FromPool(t *testing.T) { }) } -func BenchmarkMarshallWriteRequest(b *testing.B) { +func BenchmarkMarshallWriteRequestV2(b *testing.B) { ts := PreallocTimeseriesV2SliceFromPool() for i := 0; i < 100; i++ { - ts = append(ts, PreallocTimeseriesV2{TimeSeries: TimeseriesV2FromPool()}) + ts = append(ts, PreallocTimeseriesV2{TimeSeriesV2: TimeseriesV2FromPool()}) ts[i].LabelsRefs = []uint32{1, 2, 3, 4, 5, 6, 7, 8} - ts[i].Samples = []Sample{{Value: 1, Timestamp: 2}} + ts[i].Samples = []Sample{{Value: 1, TimestampMs: 2}} } tests := []struct { @@ -69,7 +69,7 @@ func BenchmarkMarshallWriteRequest(b *testing.B) { { name: "no-pool", writeRequestFactory: func() proto.Marshaler { - return &WriteRequest{Timeseries: ts} + return &WriteRequestV2{Timeseries: ts} }, clean: func(in interface{}) {}, }, diff --git a/pkg/cortexpbv2/codecv2.go b/pkg/cortexpbv2/codecv2.go deleted file mode 100644 index 9fe7b434e0..0000000000 --- a/pkg/cortexpbv2/codecv2.go +++ /dev/null @@ -1,160 +0,0 @@ -package cortexpbv2 - -import ( - "github.com/prometheus/prometheus/model/histogram" - "github.com/prometheus/prometheus/model/labels" - - "github.com/cortexproject/cortex/pkg/cortexpb" -) - -// ToLabels return model labels.Labels from timeseries' remote labels. -func (t TimeSeries) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels { - return desymbolizeLabels(b, t.GetLabelsRefs(), symbols) -} - -// ToLabels return model labels.Labels from exemplar remote labels. -func (e Exemplar) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels { - return desymbolizeLabels(b, e.GetLabelsRefs(), symbols) -} - -func (m Metadata) ToV1Metadata(name string, symbols []string) *cortexpb.MetricMetadata { - typ := cortexpb.UNKNOWN - - switch m.Type { - case METRIC_TYPE_COUNTER: - typ = cortexpb.COUNTER - case METRIC_TYPE_GAUGE: - typ = cortexpb.GAUGE - case METRIC_TYPE_HISTOGRAM: - typ = cortexpb.HISTOGRAM - case METRIC_TYPE_GAUGEHISTOGRAM: - typ = cortexpb.GAUGEHISTOGRAM - case METRIC_TYPE_SUMMARY: - typ = cortexpb.SUMMARY - case METRIC_TYPE_INFO: - typ = cortexpb.INFO - case METRIC_TYPE_STATESET: - typ = cortexpb.STATESET - } - - return &cortexpb.MetricMetadata{ - Type: typ, - MetricFamilyName: name, - Unit: symbols[m.UnitRef], - Help: symbols[m.HelpRef], - } -} - -// desymbolizeLabels decodes label references, with given symbols to labels. -func desymbolizeLabels(b *labels.ScratchBuilder, labelRefs []uint32, symbols []string) labels.Labels { - b.Reset() - for i := 0; i < len(labelRefs); i += 2 { - b.Add(symbols[labelRefs[i]], symbols[labelRefs[i+1]]) - } - b.Sort() - return b.Labels() -} - -// IsFloatHistogram returns true if the histogram is float. -func (h Histogram) IsFloatHistogram() bool { - _, ok := h.GetCount().(*Histogram_CountFloat) - return ok -} - -// FloatHistogramProtoToFloatHistogram extracts a float Histogram from the provided proto message. -func FloatHistogramProtoToFloatHistogram(h Histogram) *histogram.FloatHistogram { - if !h.IsFloatHistogram() { - panic("FloatHistogramProtoToFloatHistogram called with an integer histogram") - } - - return &histogram.FloatHistogram{ - CounterResetHint: histogram.CounterResetHint(h.ResetHint), - Schema: h.Schema, - ZeroThreshold: h.ZeroThreshold, - ZeroCount: h.GetZeroCountFloat(), - Count: h.GetCountFloat(), - Sum: h.Sum, - PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), - PositiveBuckets: h.GetPositiveCounts(), - NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), - NegativeBuckets: h.GetNegativeCounts(), - CustomValues: h.GetCustomValues(), - } -} - -func HistogramProtoToHistogram(h Histogram) *histogram.Histogram { - if h.IsFloatHistogram() { - panic("HistogramProtoToHistogram called with a float histogram") - } - - return &histogram.Histogram{ - CounterResetHint: histogram.CounterResetHint(h.ResetHint), - Schema: h.Schema, - ZeroThreshold: h.ZeroThreshold, - ZeroCount: h.GetZeroCountInt(), - Count: h.GetCountInt(), - Sum: h.Sum, - PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), - PositiveBuckets: h.GetPositiveDeltas(), - NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), - NegativeBuckets: h.GetNegativeDeltas(), - CustomValues: h.GetCustomValues(), - } -} - -// HistogramToHistogramProto converts a (normal integer) Histogram to its protobuf message type. -// Changed from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L709-L723 -func HistogramToHistogramProto(timestamp int64, h *histogram.Histogram) Histogram { - return Histogram{ - Count: &Histogram_CountInt{CountInt: h.Count}, - Sum: h.Sum, - Schema: h.Schema, - ZeroThreshold: h.ZeroThreshold, - ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount}, - NegativeSpans: spansToSpansProto(h.NegativeSpans), - NegativeDeltas: h.NegativeBuckets, - PositiveSpans: spansToSpansProto(h.PositiveSpans), - PositiveDeltas: h.PositiveBuckets, - ResetHint: Histogram_ResetHint(h.CounterResetHint), - Timestamp: timestamp, - CustomValues: h.CustomValues, - } -} - -// FloatHistogramToHistogramProto converts a float Histogram to a normal -// Histogram's protobuf message type. -// Changed from https://github.com/prometheus/prometheus/blob/0ab95536115adfe50af249d36d73674be694ca3f/storage/remote/codec.go#L725-L739 -func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogram) Histogram { - return Histogram{ - Count: &Histogram_CountFloat{CountFloat: fh.Count}, - Sum: fh.Sum, - Schema: fh.Schema, - ZeroThreshold: fh.ZeroThreshold, - ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount}, - NegativeSpans: spansToSpansProto(fh.NegativeSpans), - NegativeCounts: fh.NegativeBuckets, - PositiveSpans: spansToSpansProto(fh.PositiveSpans), - PositiveCounts: fh.PositiveBuckets, - ResetHint: Histogram_ResetHint(fh.CounterResetHint), - Timestamp: timestamp, - CustomValues: fh.CustomValues, - } -} - -func spansProtoToSpans(s []BucketSpan) []histogram.Span { - spans := make([]histogram.Span, len(s)) - for i := 0; i < len(s); i++ { - spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} - } - - return spans -} - -func spansToSpansProto(s []histogram.Span) []BucketSpan { - spans := make([]BucketSpan, len(s)) - for i := 0; i < len(s); i++ { - spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} - } - - return spans -} diff --git a/pkg/cortexpbv2/cortexv2.pb.go b/pkg/cortexpbv2/cortexv2.pb.go deleted file mode 100644 index 456977d170..0000000000 --- a/pkg/cortexpbv2/cortexv2.pb.go +++ /dev/null @@ -1,4340 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: cortexv2.proto - -package cortexpbv2 - -import ( - encoding_binary "encoding/binary" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strconv "strconv" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type WriteRequest_SourceEnum int32 - -const ( - API WriteRequest_SourceEnum = 0 - RULE WriteRequest_SourceEnum = 1 -) - -var WriteRequest_SourceEnum_name = map[int32]string{ - 0: "API", - 1: "RULE", -} - -var WriteRequest_SourceEnum_value = map[string]int32{ - "API": 0, - "RULE": 1, -} - -func (WriteRequest_SourceEnum) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_affad2b75b7d03df, []int{0, 0} -} - -type Metadata_MetricType int32 - -const ( - METRIC_TYPE_UNSPECIFIED Metadata_MetricType = 0 - METRIC_TYPE_COUNTER Metadata_MetricType = 1 - METRIC_TYPE_GAUGE Metadata_MetricType = 2 - METRIC_TYPE_HISTOGRAM Metadata_MetricType = 3 - METRIC_TYPE_GAUGEHISTOGRAM Metadata_MetricType = 4 - METRIC_TYPE_SUMMARY Metadata_MetricType = 5 - METRIC_TYPE_INFO Metadata_MetricType = 6 - METRIC_TYPE_STATESET Metadata_MetricType = 7 -) - -var Metadata_MetricType_name = map[int32]string{ - 0: "METRIC_TYPE_UNSPECIFIED", - 1: "METRIC_TYPE_COUNTER", - 2: "METRIC_TYPE_GAUGE", - 3: "METRIC_TYPE_HISTOGRAM", - 4: "METRIC_TYPE_GAUGEHISTOGRAM", - 5: "METRIC_TYPE_SUMMARY", - 6: "METRIC_TYPE_INFO", - 7: "METRIC_TYPE_STATESET", -} - -var Metadata_MetricType_value = map[string]int32{ - "METRIC_TYPE_UNSPECIFIED": 0, - "METRIC_TYPE_COUNTER": 1, - "METRIC_TYPE_GAUGE": 2, - "METRIC_TYPE_HISTOGRAM": 3, - "METRIC_TYPE_GAUGEHISTOGRAM": 4, - "METRIC_TYPE_SUMMARY": 5, - "METRIC_TYPE_INFO": 6, - "METRIC_TYPE_STATESET": 7, -} - -func (Metadata_MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_affad2b75b7d03df, []int{5, 0} -} - -type Histogram_ResetHint int32 - -const ( - RESET_HINT_UNSPECIFIED Histogram_ResetHint = 0 - RESET_HINT_YES Histogram_ResetHint = 1 - RESET_HINT_NO Histogram_ResetHint = 2 - RESET_HINT_GAUGE Histogram_ResetHint = 3 -) - -var Histogram_ResetHint_name = map[int32]string{ - 0: "RESET_HINT_UNSPECIFIED", - 1: "RESET_HINT_YES", - 2: "RESET_HINT_NO", - 3: "RESET_HINT_GAUGE", -} - -var Histogram_ResetHint_value = map[string]int32{ - "RESET_HINT_UNSPECIFIED": 0, - "RESET_HINT_YES": 1, - "RESET_HINT_NO": 2, - "RESET_HINT_GAUGE": 3, -} - -func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_affad2b75b7d03df, []int{6, 0} -} - -// https://github.com/prometheus/prometheus/blob/main/prompb/io/prometheus/write/v2/types.proto -type WriteRequest struct { - Source WriteRequest_SourceEnum `protobuf:"varint,3,opt,name=Source,proto3,enum=cortexpbv2.WriteRequest_SourceEnum" json:"Source,omitempty"` - Symbols []string `protobuf:"bytes,4,rep,name=symbols,proto3" json:"symbols,omitempty"` - Timeseries []PreallocTimeseriesV2 `protobuf:"bytes,5,rep,name=timeseries,proto3,customtype=PreallocTimeseriesV2" json:"timeseries"` - SkipLabelNameValidation bool `protobuf:"varint,1000,opt,name=skip_label_name_validation,json=skipLabelNameValidation,proto3" json:"skip_label_name_validation,omitempty"` -} - -func (m *WriteRequest) Reset() { *m = WriteRequest{} } -func (*WriteRequest) ProtoMessage() {} -func (*WriteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_affad2b75b7d03df, []int{0} -} -func (m *WriteRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WriteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WriteRequest.Merge(m, src) -} -func (m *WriteRequest) XXX_Size() int { - return m.Size() -} -func (m *WriteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WriteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WriteRequest proto.InternalMessageInfo - -func (m *WriteRequest) GetSource() WriteRequest_SourceEnum { - if m != nil { - return m.Source - } - return API -} - -func (m *WriteRequest) GetSymbols() []string { - if m != nil { - return m.Symbols - } - return nil -} - -func (m *WriteRequest) GetSkipLabelNameValidation() bool { - if m != nil { - return m.SkipLabelNameValidation - } - return false -} - -type WriteResponse struct { - // Samples represents X-Prometheus-Remote-Write-Written-Samples - Samples int64 `protobuf:"varint,1,opt,name=Samples,proto3" json:"Samples,omitempty"` - // Histograms represents X-Prometheus-Remote-Write-Written-Histograms - Histograms int64 `protobuf:"varint,2,opt,name=Histograms,proto3" json:"Histograms,omitempty"` - // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars - Exemplars int64 `protobuf:"varint,3,opt,name=Exemplars,proto3" json:"Exemplars,omitempty"` -} - -func (m *WriteResponse) Reset() { *m = WriteResponse{} } -func (*WriteResponse) ProtoMessage() {} -func (*WriteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_affad2b75b7d03df, []int{1} -} -func (m *WriteResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WriteResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WriteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WriteResponse.Merge(m, src) -} -func (m *WriteResponse) XXX_Size() int { - return m.Size() -} -func (m *WriteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WriteResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_WriteResponse proto.InternalMessageInfo - -func (m *WriteResponse) GetSamples() int64 { - if m != nil { - return m.Samples - } - return 0 -} - -func (m *WriteResponse) GetHistograms() int64 { - if m != nil { - return m.Histograms - } - return 0 -} - -func (m *WriteResponse) GetExemplars() int64 { - if m != nil { - return m.Exemplars - } - return 0 -} - -type TimeSeries struct { - LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"` - // Timeseries messages can either specify samples or (native) histogram samples - // (histogram field), but not both. For a typical sender (real-time metric - // streaming), in healthy cases, there will be only one sample or histogram. - // - // Samples and histograms are sorted by timestamp (older first). - Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` - Histograms []Histogram `protobuf:"bytes,3,rep,name=histograms,proto3" json:"histograms"` - // exemplars represents an optional set of exemplars attached to this series' samples. - Exemplars []Exemplar `protobuf:"bytes,4,rep,name=exemplars,proto3" json:"exemplars"` - // metadata represents the metadata associated with the given series' samples. - Metadata Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata"` - // created_timestamp represents an optional created timestamp associated with - // this series' samples in ms format, typically for counter or histogram type - // metrics. Created timestamp represents the time when the counter started - // counting (sometimes referred to as start timestamp), which can increase - // the accuracy of query results. - // - // Note that some receivers might require this and in return fail to - // ingest such samples within the Request. - // - // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go - // for conversion from/to time.Time to Prometheus timestamp. - // - // Note that the "optional" keyword is omitted due to - // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields - // Zero value means value not set. If you need to use exactly zero value for - // the timestamp, use 1 millisecond before or after. - CreatedTimestamp int64 `protobuf:"varint,6,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` -} - -func (m *TimeSeries) Reset() { *m = TimeSeries{} } -func (*TimeSeries) ProtoMessage() {} -func (*TimeSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_affad2b75b7d03df, []int{2} -} -func (m *TimeSeries) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TimeSeries) XXX_Merge(src proto.Message) { - xxx_messageInfo_TimeSeries.Merge(m, src) -} -func (m *TimeSeries) XXX_Size() int { - return m.Size() -} -func (m *TimeSeries) XXX_DiscardUnknown() { - xxx_messageInfo_TimeSeries.DiscardUnknown(m) -} - -var xxx_messageInfo_TimeSeries proto.InternalMessageInfo - -func (m *TimeSeries) GetLabelsRefs() []uint32 { - if m != nil { - return m.LabelsRefs - } - return nil -} - -func (m *TimeSeries) GetSamples() []Sample { - if m != nil { - return m.Samples - } - return nil -} - -func (m *TimeSeries) GetHistograms() []Histogram { - if m != nil { - return m.Histograms - } - return nil -} - -func (m *TimeSeries) GetExemplars() []Exemplar { - if m != nil { - return m.Exemplars - } - return nil -} - -func (m *TimeSeries) GetMetadata() Metadata { - if m != nil { - return m.Metadata - } - return Metadata{} -} - -func (m *TimeSeries) GetCreatedTimestamp() int64 { - if m != nil { - return m.CreatedTimestamp - } - return 0 -} - -// Exemplar is an additional information attached to some series' samples. -// It is typically used to attach an example trace or request ID associated with -// the metric changes. -type Exemplar struct { - // labels_refs is an optional list of label name-value pair references, encoded - // as indices to the Request.symbols array. This list's len is always - // a multiple of 2, and the underlying labels should be sorted lexicographically. - // If the exemplar references a trace it should use the `trace_id` label name, as a best practice. - LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"` - // value represents an exact example value. This can be useful when the exemplar - // is attached to a histogram, which only gives an estimated value through buckets. - Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` - // timestamp represents the timestamp of the exemplar in ms. - // - // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go - // for conversion from/to time.Time to Prometheus timestamp. - Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` -} - -func (m *Exemplar) Reset() { *m = Exemplar{} } -func (*Exemplar) ProtoMessage() {} -func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_affad2b75b7d03df, []int{3} -} -func (m *Exemplar) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Exemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_Exemplar.Merge(m, src) -} -func (m *Exemplar) XXX_Size() int { - return m.Size() -} -func (m *Exemplar) XXX_DiscardUnknown() { - xxx_messageInfo_Exemplar.DiscardUnknown(m) -} - -var xxx_messageInfo_Exemplar proto.InternalMessageInfo - -func (m *Exemplar) GetLabelsRefs() []uint32 { - if m != nil { - return m.LabelsRefs - } - return nil -} - -func (m *Exemplar) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *Exemplar) GetTimestamp() int64 { - if m != nil { - return m.Timestamp - } - return 0 -} - -// Sample represents series sample. -type Sample struct { - // value of the sample. - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` - // timestamp represents timestamp of the sample in ms. - // - // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go - // for conversion from/to time.Time to Prometheus timestamp. - Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` -} - -func (m *Sample) Reset() { *m = Sample{} } -func (*Sample) ProtoMessage() {} -func (*Sample) Descriptor() ([]byte, []int) { - return fileDescriptor_affad2b75b7d03df, []int{4} -} -func (m *Sample) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Sample.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Sample) XXX_Merge(src proto.Message) { - xxx_messageInfo_Sample.Merge(m, src) -} -func (m *Sample) XXX_Size() int { - return m.Size() -} -func (m *Sample) XXX_DiscardUnknown() { - xxx_messageInfo_Sample.DiscardUnknown(m) -} - -var xxx_messageInfo_Sample proto.InternalMessageInfo - -func (m *Sample) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *Sample) GetTimestamp() int64 { - if m != nil { - return m.Timestamp - } - return 0 -} - -// Metadata represents the metadata associated with the given series' samples. -type Metadata struct { - Type Metadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=cortexpbv2.Metadata_MetricType" json:"type,omitempty"` - // help_ref is a reference to the Request.symbols array representing help - // text for the metric. Help is optional, reference should point to an empty string in - // such a case. - HelpRef uint32 `protobuf:"varint,3,opt,name=help_ref,json=helpRef,proto3" json:"help_ref,omitempty"` - // unit_ref is a reference to the Request.symbols array representing a unit - // for the metric. Unit is optional, reference should point to an empty string in - // such a case. - UnitRef uint32 `protobuf:"varint,4,opt,name=unit_ref,json=unitRef,proto3" json:"unit_ref,omitempty"` -} - -func (m *Metadata) Reset() { *m = Metadata{} } -func (*Metadata) ProtoMessage() {} -func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_affad2b75b7d03df, []int{5} -} -func (m *Metadata) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(m, src) -} -func (m *Metadata) XXX_Size() int { - return m.Size() -} -func (m *Metadata) XXX_DiscardUnknown() { - xxx_messageInfo_Metadata.DiscardUnknown(m) -} - -var xxx_messageInfo_Metadata proto.InternalMessageInfo - -func (m *Metadata) GetType() Metadata_MetricType { - if m != nil { - return m.Type - } - return METRIC_TYPE_UNSPECIFIED -} - -func (m *Metadata) GetHelpRef() uint32 { - if m != nil { - return m.HelpRef - } - return 0 -} - -func (m *Metadata) GetUnitRef() uint32 { - if m != nil { - return m.UnitRef - } - return 0 -} - -// A native histogram, also known as a sparse histogram. -// Original design doc: -// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit -// The appendix of this design doc also explains the concept of float -// histograms. This Histogram message can represent both, the usual -// integer histogram as well as a float histogram. -type Histogram struct { - // Types that are valid to be assigned to Count: - // - // *Histogram_CountInt - // *Histogram_CountFloat - Count isHistogram_Count `protobuf_oneof:"count"` - Sum float64 `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"` - // The schema defines the bucket schema. Currently, valid numbers - // are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be - // added in future for new bucketing layouts. - // - // The schema equal to -53 means custom buckets. See - // custom_values field description for more details. - // - // Values between -4 and 8 represent base-2 bucket schema, where 1 - // is a bucket boundary in each case, and then each power of two is - // divided into 2^n (n is schema value) logarithmic buckets. Or in other words, - // each bucket boundary is the previous boundary times 2^(2^-n). - Schema int32 `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"` - ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` - // Types that are valid to be assigned to ZeroCount: - // - // *Histogram_ZeroCountInt - // *Histogram_ZeroCountFloat - ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"` - // Negative Buckets. - NegativeSpans []BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans"` - // Use either "negative_deltas" or "negative_counts", the former for - // regular histograms with integer counts, the latter for - // float histograms. - NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"` - NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"` - // Positive Buckets. - // - // In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows: - // * The span offset+length points to an the index of the custom_values array - // or +Inf if pointing to the len of the array. - // * The counts and deltas have the same meaning as for exponential histograms. - PositiveSpans []BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans"` - // Use either "positive_deltas" or "positive_counts", the former for - // regular histograms with integer counts, the latter for - // float histograms. - PositiveDeltas []int64 `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"` - PositiveCounts []float64 `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"` - ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=cortexpbv2.Histogram_ResetHint" json:"reset_hint,omitempty"` - // timestamp represents timestamp of the sample in ms. - // - // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go - // for conversion from/to time.Time to Prometheus timestamp. - Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // custom_values is an additional field used by non-exponential bucketing layouts. - // - // For custom buckets (-53 schema value) custom_values specify monotonically - // increasing upper inclusive boundaries for the bucket counts with arbitrary - // widths for this histogram. In other words, custom_values represents custom, - // explicit bucketing that could have been converted from the classic histograms. - // - // Those bounds are then referenced by spans in positive_spans with corresponding positive - // counts of deltas (refer to positive_spans for more details). This way we can - // have encode sparse histograms with custom bucketing (many buckets are often - // not used). - // - // Note that for custom bounds, even negative observations are placed in the positive - // counts to simplify the implementation and avoid ambiguity of where to place - // an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and - // the zero bucket are unused, if the schema indicates custom bucketing. - // - // For each upper boundary the previous boundary represent the lower exclusive - // boundary for that bucket. The first element is the upper inclusive boundary - // for the first bucket, which implicitly has a lower inclusive bound of -Inf. - // This is similar to "le" label semantics on classic histograms. You may add a - // bucket with an upper bound of 0 to make sure that you really have no negative - // observations, but in practice, native histogram rendering will show both with - // or without first upper boundary 0 and no negative counts as the same case. - // - // The last element is not only the upper inclusive bound of the last regular - // bucket, but implicitly the lower exclusive bound of the +Inf bucket. - CustomValues []float64 `protobuf:"fixed64,16,rep,packed,name=custom_values,json=customValues,proto3" json:"custom_values,omitempty"` -} - -func (m *Histogram) Reset() { *m = Histogram{} } -func (*Histogram) ProtoMessage() {} -func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_affad2b75b7d03df, []int{6} -} -func (m *Histogram) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(m, src) -} -func (m *Histogram) XXX_Size() int { - return m.Size() -} -func (m *Histogram) XXX_DiscardUnknown() { - xxx_messageInfo_Histogram.DiscardUnknown(m) -} - -var xxx_messageInfo_Histogram proto.InternalMessageInfo - -type isHistogram_Count interface { - isHistogram_Count() - Equal(interface{}) bool - MarshalTo([]byte) (int, error) - Size() int -} -type isHistogram_ZeroCount interface { - isHistogram_ZeroCount() - Equal(interface{}) bool - MarshalTo([]byte) (int, error) - Size() int -} - -type Histogram_CountInt struct { - CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof"` -} -type Histogram_CountFloat struct { - CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof"` -} -type Histogram_ZeroCountInt struct { - ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof"` -} -type Histogram_ZeroCountFloat struct { - ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof"` -} - -func (*Histogram_CountInt) isHistogram_Count() {} -func (*Histogram_CountFloat) isHistogram_Count() {} -func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {} -func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {} - -func (m *Histogram) GetCount() isHistogram_Count { - if m != nil { - return m.Count - } - return nil -} -func (m *Histogram) GetZeroCount() isHistogram_ZeroCount { - if m != nil { - return m.ZeroCount - } - return nil -} - -func (m *Histogram) GetCountInt() uint64 { - if x, ok := m.GetCount().(*Histogram_CountInt); ok { - return x.CountInt - } - return 0 -} - -func (m *Histogram) GetCountFloat() float64 { - if x, ok := m.GetCount().(*Histogram_CountFloat); ok { - return x.CountFloat - } - return 0 -} - -func (m *Histogram) GetSum() float64 { - if m != nil { - return m.Sum - } - return 0 -} - -func (m *Histogram) GetSchema() int32 { - if m != nil { - return m.Schema - } - return 0 -} - -func (m *Histogram) GetZeroThreshold() float64 { - if m != nil { - return m.ZeroThreshold - } - return 0 -} - -func (m *Histogram) GetZeroCountInt() uint64 { - if x, ok := m.GetZeroCount().(*Histogram_ZeroCountInt); ok { - return x.ZeroCountInt - } - return 0 -} - -func (m *Histogram) GetZeroCountFloat() float64 { - if x, ok := m.GetZeroCount().(*Histogram_ZeroCountFloat); ok { - return x.ZeroCountFloat - } - return 0 -} - -func (m *Histogram) GetNegativeSpans() []BucketSpan { - if m != nil { - return m.NegativeSpans - } - return nil -} - -func (m *Histogram) GetNegativeDeltas() []int64 { - if m != nil { - return m.NegativeDeltas - } - return nil -} - -func (m *Histogram) GetNegativeCounts() []float64 { - if m != nil { - return m.NegativeCounts - } - return nil -} - -func (m *Histogram) GetPositiveSpans() []BucketSpan { - if m != nil { - return m.PositiveSpans - } - return nil -} - -func (m *Histogram) GetPositiveDeltas() []int64 { - if m != nil { - return m.PositiveDeltas - } - return nil -} - -func (m *Histogram) GetPositiveCounts() []float64 { - if m != nil { - return m.PositiveCounts - } - return nil -} - -func (m *Histogram) GetResetHint() Histogram_ResetHint { - if m != nil { - return m.ResetHint - } - return RESET_HINT_UNSPECIFIED -} - -func (m *Histogram) GetTimestamp() int64 { - if m != nil { - return m.Timestamp - } - return 0 -} - -func (m *Histogram) GetCustomValues() []float64 { - if m != nil { - return m.CustomValues - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Histogram) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Histogram_CountInt)(nil), - (*Histogram_CountFloat)(nil), - (*Histogram_ZeroCountInt)(nil), - (*Histogram_ZeroCountFloat)(nil), - } -} - -// A BucketSpan defines a number of consecutive buckets with their -// offset. Logically, it would be more straightforward to include the -// bucket counts in the Span. However, the protobuf representation is -// more compact in the way the data is structured here (with all the -// buckets in a single array separate from the Spans). -type BucketSpan struct { - Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` - Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` -} - -func (m *BucketSpan) Reset() { *m = BucketSpan{} } -func (*BucketSpan) ProtoMessage() {} -func (*BucketSpan) Descriptor() ([]byte, []int) { - return fileDescriptor_affad2b75b7d03df, []int{7} -} -func (m *BucketSpan) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BucketSpan) XXX_Merge(src proto.Message) { - xxx_messageInfo_BucketSpan.Merge(m, src) -} -func (m *BucketSpan) XXX_Size() int { - return m.Size() -} -func (m *BucketSpan) XXX_DiscardUnknown() { - xxx_messageInfo_BucketSpan.DiscardUnknown(m) -} - -var xxx_messageInfo_BucketSpan proto.InternalMessageInfo - -func (m *BucketSpan) GetOffset() int32 { - if m != nil { - return m.Offset - } - return 0 -} - -func (m *BucketSpan) GetLength() uint32 { - if m != nil { - return m.Length - } - return 0 -} - -func init() { - proto.RegisterEnum("cortexpbv2.WriteRequest_SourceEnum", WriteRequest_SourceEnum_name, WriteRequest_SourceEnum_value) - proto.RegisterEnum("cortexpbv2.Metadata_MetricType", Metadata_MetricType_name, Metadata_MetricType_value) - proto.RegisterEnum("cortexpbv2.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value) - proto.RegisterType((*WriteRequest)(nil), "cortexpbv2.WriteRequest") - proto.RegisterType((*WriteResponse)(nil), "cortexpbv2.WriteResponse") - proto.RegisterType((*TimeSeries)(nil), "cortexpbv2.TimeSeries") - proto.RegisterType((*Exemplar)(nil), "cortexpbv2.Exemplar") - proto.RegisterType((*Sample)(nil), "cortexpbv2.Sample") - proto.RegisterType((*Metadata)(nil), "cortexpbv2.Metadata") - proto.RegisterType((*Histogram)(nil), "cortexpbv2.Histogram") - proto.RegisterType((*BucketSpan)(nil), "cortexpbv2.BucketSpan") -} - -func init() { proto.RegisterFile("cortexv2.proto", fileDescriptor_affad2b75b7d03df) } - -var fileDescriptor_affad2b75b7d03df = []byte{ - // 1094 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0x4d, 0x6f, 0x1b, 0x45, - 0x18, 0xf6, 0x78, 0x1d, 0x7f, 0xbc, 0x89, 0xdd, 0xcd, 0xe0, 0xb6, 0xdb, 0x50, 0x36, 0xc6, 0x15, - 0x60, 0x81, 0x14, 0x24, 0x57, 0x42, 0x48, 0xad, 0x10, 0x71, 0xba, 0x89, 0x8d, 0x1a, 0x27, 0x1a, - 0x6f, 0x82, 0xc2, 0x65, 0xb5, 0xb1, 0xc7, 0xf6, 0xaa, 0xfb, 0xc5, 0xce, 0x38, 0x6a, 0x38, 0xf1, - 0x13, 0xf8, 0x09, 0x3d, 0xf2, 0x27, 0xb8, 0xf7, 0x98, 0x0b, 0x52, 0x85, 0x44, 0x45, 0x9c, 0x4b, - 0x8f, 0xfd, 0x09, 0x68, 0x67, 0x3f, 0x93, 0x80, 0xe0, 0x36, 0xef, 0xf3, 0x3e, 0xcf, 0xec, 0x33, - 0xcf, 0xcc, 0x6b, 0x19, 0x1a, 0x63, 0x2f, 0xe0, 0xf4, 0xe5, 0x59, 0x77, 0xcb, 0x0f, 0x3c, 0xee, - 0x61, 0x88, 0x6a, 0xff, 0xf4, 0xac, 0xbb, 0xd1, 0x9c, 0x79, 0x33, 0x4f, 0xc0, 0x5f, 0x86, 0xab, - 0x88, 0xd1, 0x7e, 0x55, 0x84, 0xb5, 0xef, 0x03, 0x8b, 0x53, 0x42, 0x7f, 0x5c, 0x50, 0xc6, 0xf1, - 0x13, 0x28, 0x8f, 0xbc, 0x45, 0x30, 0xa6, 0x8a, 0xd4, 0x42, 0x9d, 0x46, 0xf7, 0xd1, 0x56, 0xb6, - 0xc7, 0x56, 0x9e, 0xb9, 0x15, 0xd1, 0x34, 0x77, 0xe1, 0x90, 0x58, 0x82, 0x15, 0xa8, 0xb0, 0x73, - 0xe7, 0xd4, 0xb3, 0x99, 0x52, 0x6a, 0x49, 0x9d, 0x1a, 0x49, 0x4a, 0xac, 0x03, 0x70, 0xcb, 0xa1, - 0x8c, 0x06, 0x16, 0x65, 0xca, 0x4a, 0x4b, 0xea, 0xac, 0x76, 0xef, 0xe5, 0xb7, 0xd6, 0x2d, 0x87, - 0x8e, 0x44, 0xb7, 0xf7, 0xf0, 0xf5, 0xdb, 0xcd, 0xc2, 0x1f, 0x6f, 0x37, 0x9b, 0x87, 0x01, 0x35, - 0x6d, 0xdb, 0x1b, 0xeb, 0xa9, 0xf2, 0xb8, 0x4b, 0x72, 0xfb, 0xe0, 0xa7, 0xb0, 0xc1, 0x5e, 0x58, - 0xbe, 0x61, 0x9b, 0xa7, 0xd4, 0x36, 0x5c, 0xd3, 0xa1, 0xc6, 0x99, 0x69, 0x5b, 0x13, 0x93, 0x5b, - 0x9e, 0xab, 0xbc, 0xab, 0xb4, 0x50, 0xa7, 0x4a, 0xee, 0x87, 0x94, 0xe7, 0x21, 0x63, 0x68, 0x3a, - 0xf4, 0x38, 0xed, 0xb7, 0x37, 0x01, 0xb2, 0x33, 0xe0, 0x0a, 0x48, 0xdb, 0x87, 0x03, 0xb9, 0x80, - 0xab, 0x50, 0x22, 0x47, 0xcf, 0x35, 0x19, 0x7d, 0x57, 0xaa, 0x22, 0x59, 0x6a, 0xcf, 0xa0, 0x1e, - 0x9f, 0x9b, 0xf9, 0x9e, 0xcb, 0xc4, 0x29, 0x47, 0xa6, 0xe3, 0xdb, 0x94, 0x29, 0xa8, 0x85, 0x3a, - 0x12, 0x49, 0x4a, 0xac, 0x02, 0xf4, 0x2d, 0xc6, 0xbd, 0x59, 0x60, 0x3a, 0x4c, 0x29, 0x8a, 0x66, - 0x0e, 0xc1, 0x0f, 0xa1, 0xa6, 0xbd, 0xa4, 0x8e, 0x6f, 0x9b, 0x01, 0x13, 0xf9, 0x4a, 0x24, 0x03, - 0xda, 0xbf, 0x15, 0x01, 0xb2, 0x18, 0xf0, 0x26, 0xac, 0x8a, 0x73, 0x31, 0x23, 0xa0, 0xd3, 0xf0, - 0x53, 0x52, 0xa7, 0x4e, 0x20, 0x82, 0x08, 0x9d, 0x32, 0xdc, 0x85, 0x0a, 0x8b, 0x7d, 0x14, 0x45, - 0xa0, 0x38, 0x1f, 0x68, 0xe4, 0xa9, 0x57, 0x0a, 0xc3, 0x24, 0x09, 0x11, 0x3f, 0x01, 0x98, 0x67, - 0x0e, 0x25, 0x21, 0xbb, 0x9b, 0x97, 0xa5, 0x6e, 0x63, 0x65, 0x8e, 0x8e, 0xbf, 0x86, 0x1a, 0x4d, - 0xed, 0x97, 0x84, 0xb6, 0x99, 0xd7, 0x26, 0x47, 0x89, 0xa5, 0x19, 0x19, 0x7f, 0x05, 0x55, 0x87, - 0x72, 0x73, 0x62, 0x72, 0x53, 0x59, 0x69, 0xa1, 0x9b, 0xc2, 0xfd, 0xb8, 0x17, 0x0b, 0x53, 0x2e, - 0xfe, 0x02, 0xd6, 0xc7, 0x01, 0x35, 0x39, 0x9d, 0x18, 0xe2, 0xda, 0xb9, 0xe9, 0xf8, 0x4a, 0x59, - 0x04, 0x27, 0xc7, 0x0d, 0x3d, 0xc1, 0xdb, 0x06, 0x54, 0x13, 0x07, 0xff, 0x1d, 0x5e, 0x13, 0x56, - 0xce, 0x4c, 0x7b, 0x41, 0xc5, 0x2d, 0x21, 0x12, 0x15, 0xe1, 0x05, 0x65, 0xdf, 0x89, 0x2f, 0x28, - 0x05, 0xda, 0x4f, 0xa1, 0x1c, 0xa5, 0x9a, 0xa9, 0xd1, 0xbf, 0xaa, 0x8b, 0x37, 0xd5, 0xbf, 0x17, - 0xa1, 0x9a, 0x1c, 0x14, 0x3f, 0x86, 0x12, 0x3f, 0xf7, 0x23, 0x7d, 0xa3, 0xbb, 0xf9, 0x4f, 0x61, - 0x84, 0x8b, 0xc0, 0x1a, 0xeb, 0xe7, 0x3e, 0x25, 0x82, 0x8c, 0x1f, 0x40, 0x75, 0x4e, 0x6d, 0x3f, - 0x3c, 0x92, 0x30, 0x57, 0x27, 0x95, 0xb0, 0x26, 0x74, 0x1a, 0xb6, 0x16, 0xae, 0xc5, 0x45, 0xab, - 0x14, 0xb5, 0xc2, 0x9a, 0xd0, 0x69, 0xfb, 0x4f, 0x04, 0x90, 0x6d, 0x85, 0x3f, 0x84, 0xfb, 0xfb, - 0x9a, 0x4e, 0x06, 0x3b, 0x86, 0x7e, 0x72, 0xa8, 0x19, 0x47, 0xc3, 0xd1, 0xa1, 0xb6, 0x33, 0xd8, - 0x1d, 0x68, 0xcf, 0xe4, 0x02, 0xbe, 0x0f, 0x1f, 0xe4, 0x9b, 0x3b, 0x07, 0x47, 0x43, 0x5d, 0x23, - 0x32, 0xc2, 0x77, 0x61, 0x3d, 0xdf, 0xd8, 0xdb, 0x3e, 0xda, 0xd3, 0xe4, 0x22, 0x7e, 0x00, 0x77, - 0xf3, 0x70, 0x7f, 0x30, 0xd2, 0x0f, 0xf6, 0xc8, 0xf6, 0xbe, 0x2c, 0x61, 0x15, 0x36, 0x6e, 0x29, - 0xb2, 0x7e, 0xe9, 0xe6, 0xa7, 0x46, 0x47, 0xfb, 0xfb, 0xdb, 0xe4, 0x44, 0x5e, 0xc1, 0x4d, 0x90, - 0xf3, 0x8d, 0xc1, 0x70, 0xf7, 0x40, 0x2e, 0x63, 0x05, 0x9a, 0xd7, 0xe8, 0xfa, 0xb6, 0xae, 0x8d, - 0x34, 0x5d, 0xae, 0xb4, 0x5f, 0x95, 0xa1, 0x96, 0xbe, 0x5a, 0xfc, 0x11, 0xd4, 0xc6, 0xde, 0xc2, - 0xe5, 0x86, 0xe5, 0x72, 0x91, 0x6e, 0xa9, 0x5f, 0x20, 0x55, 0x01, 0x0d, 0x5c, 0x8e, 0x3f, 0x86, - 0xd5, 0xa8, 0x3d, 0xb5, 0x3d, 0x93, 0x47, 0x97, 0xdf, 0x2f, 0x10, 0x10, 0xe0, 0x6e, 0x88, 0x61, - 0x19, 0x24, 0xb6, 0x70, 0x44, 0xc0, 0x88, 0x84, 0x4b, 0x7c, 0x0f, 0xca, 0x6c, 0x3c, 0xa7, 0x8e, - 0x29, 0xa2, 0x5d, 0x27, 0x71, 0x85, 0x3f, 0x81, 0xc6, 0x4f, 0x34, 0xf0, 0x0c, 0x3e, 0x0f, 0x28, - 0x9b, 0x7b, 0xf6, 0x44, 0xbc, 0x6d, 0x44, 0xea, 0x21, 0xaa, 0x27, 0x20, 0xfe, 0x34, 0xa6, 0x65, - 0xbe, 0xca, 0xc2, 0x17, 0x22, 0x6b, 0x21, 0xbe, 0x93, 0x78, 0xfb, 0x1c, 0xe4, 0x1c, 0x2f, 0x32, - 0x58, 0x11, 0x06, 0x11, 0x69, 0xa4, 0xcc, 0xc8, 0xe4, 0x0e, 0x34, 0x5c, 0x3a, 0x33, 0xb9, 0x75, - 0x46, 0x0d, 0xe6, 0x9b, 0x2e, 0x53, 0xaa, 0xb7, 0x7f, 0x53, 0x7b, 0x8b, 0xf1, 0x0b, 0xca, 0x47, - 0xbe, 0xe9, 0xc6, 0x83, 0x55, 0x4f, 0x34, 0x21, 0xc6, 0xf0, 0x67, 0x70, 0x27, 0xdd, 0x64, 0x42, - 0x6d, 0x6e, 0x32, 0xa5, 0xd6, 0x92, 0x3a, 0x98, 0xa4, 0x7b, 0x3f, 0x13, 0xe8, 0x35, 0xa2, 0x70, - 0xc7, 0x14, 0x68, 0x49, 0x1d, 0x94, 0x11, 0x85, 0x35, 0x16, 0xda, 0xf2, 0x3d, 0x66, 0xe5, 0x6c, - 0xad, 0xfe, 0x1f, 0x5b, 0x89, 0x26, 0xb5, 0x95, 0x6e, 0x12, 0xdb, 0x5a, 0x8b, 0x6c, 0x25, 0x70, - 0x66, 0x2b, 0x25, 0xc6, 0xb6, 0xea, 0x91, 0xad, 0x04, 0x8e, 0x6d, 0x7d, 0x03, 0x10, 0x50, 0x46, - 0xb9, 0x31, 0x0f, 0xd3, 0x6f, 0xdc, 0x9e, 0xb9, 0xf4, 0xfd, 0x6c, 0x91, 0x90, 0xd7, 0xb7, 0x5c, - 0x4e, 0x6a, 0x41, 0xb2, 0xbc, 0x3e, 0xd8, 0x77, 0x6e, 0x0c, 0x36, 0x7e, 0x04, 0xf5, 0xf1, 0x82, - 0x71, 0xcf, 0x31, 0xc4, 0xcf, 0x00, 0x53, 0x64, 0x61, 0x62, 0x2d, 0x02, 0x8f, 0x05, 0xd6, 0x9e, - 0x40, 0x2d, 0xdd, 0x1a, 0x6f, 0xc0, 0x3d, 0x12, 0xbe, 0x5e, 0xa3, 0x3f, 0x18, 0xea, 0x37, 0x46, - 0x10, 0x43, 0x23, 0xd7, 0x3b, 0xd1, 0x46, 0x32, 0xc2, 0xeb, 0x50, 0xcf, 0x61, 0xc3, 0x03, 0xb9, - 0x18, 0x4e, 0x49, 0x0e, 0x8a, 0xe6, 0x51, 0xea, 0x55, 0x60, 0x45, 0x04, 0xd1, 0x5b, 0x03, 0xc8, - 0xde, 0x52, 0xfb, 0x29, 0x40, 0x16, 0x7a, 0xf8, 0x9c, 0xbd, 0xe9, 0x94, 0xd1, 0x68, 0x3e, 0xd6, - 0x49, 0x5c, 0x85, 0xb8, 0x4d, 0xdd, 0x19, 0x9f, 0x8b, 0xb1, 0xa8, 0x93, 0xb8, 0xea, 0x7d, 0x7b, - 0x71, 0xa9, 0x16, 0xde, 0x5c, 0xaa, 0x85, 0xf7, 0x97, 0x2a, 0xfa, 0x79, 0xa9, 0xa2, 0x5f, 0x97, - 0x2a, 0x7a, 0xbd, 0x54, 0xd1, 0xc5, 0x52, 0x45, 0x7f, 0x2d, 0x55, 0xf4, 0x6e, 0xa9, 0x16, 0xde, - 0x2f, 0x55, 0xf4, 0xcb, 0x95, 0x5a, 0xb8, 0xb8, 0x52, 0x0b, 0x6f, 0xae, 0xd4, 0xc2, 0x0f, 0xb9, - 0xff, 0x1e, 0xa7, 0x65, 0xf1, 0x67, 0xe3, 0xf1, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x03, 0x9f, - 0x7c, 0x56, 0xa0, 0x08, 0x00, 0x00, -} - -func (x WriteRequest_SourceEnum) String() string { - s, ok := WriteRequest_SourceEnum_name[int32(x)] - if ok { - return s - } - return strconv.Itoa(int(x)) -} -func (x Metadata_MetricType) String() string { - s, ok := Metadata_MetricType_name[int32(x)] - if ok { - return s - } - return strconv.Itoa(int(x)) -} -func (x Histogram_ResetHint) String() string { - s, ok := Histogram_ResetHint_name[int32(x)] - if ok { - return s - } - return strconv.Itoa(int(x)) -} -func (this *WriteRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*WriteRequest) - if !ok { - that2, ok := that.(WriteRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Source != that1.Source { - return false - } - if len(this.Symbols) != len(that1.Symbols) { - return false - } - for i := range this.Symbols { - if this.Symbols[i] != that1.Symbols[i] { - return false - } - } - if len(this.Timeseries) != len(that1.Timeseries) { - return false - } - for i := range this.Timeseries { - if !this.Timeseries[i].Equal(that1.Timeseries[i]) { - return false - } - } - if this.SkipLabelNameValidation != that1.SkipLabelNameValidation { - return false - } - return true -} -func (this *WriteResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*WriteResponse) - if !ok { - that2, ok := that.(WriteResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Samples != that1.Samples { - return false - } - if this.Histograms != that1.Histograms { - return false - } - if this.Exemplars != that1.Exemplars { - return false - } - return true -} -func (this *TimeSeries) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*TimeSeries) - if !ok { - that2, ok := that.(TimeSeries) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.LabelsRefs) != len(that1.LabelsRefs) { - return false - } - for i := range this.LabelsRefs { - if this.LabelsRefs[i] != that1.LabelsRefs[i] { - return false - } - } - if len(this.Samples) != len(that1.Samples) { - return false - } - for i := range this.Samples { - if !this.Samples[i].Equal(&that1.Samples[i]) { - return false - } - } - if len(this.Histograms) != len(that1.Histograms) { - return false - } - for i := range this.Histograms { - if !this.Histograms[i].Equal(&that1.Histograms[i]) { - return false - } - } - if len(this.Exemplars) != len(that1.Exemplars) { - return false - } - for i := range this.Exemplars { - if !this.Exemplars[i].Equal(&that1.Exemplars[i]) { - return false - } - } - if !this.Metadata.Equal(&that1.Metadata) { - return false - } - if this.CreatedTimestamp != that1.CreatedTimestamp { - return false - } - return true -} -func (this *Exemplar) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Exemplar) - if !ok { - that2, ok := that.(Exemplar) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.LabelsRefs) != len(that1.LabelsRefs) { - return false - } - for i := range this.LabelsRefs { - if this.LabelsRefs[i] != that1.LabelsRefs[i] { - return false - } - } - if this.Value != that1.Value { - return false - } - if this.Timestamp != that1.Timestamp { - return false - } - return true -} -func (this *Sample) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Sample) - if !ok { - that2, ok := that.(Sample) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Value != that1.Value { - return false - } - if this.Timestamp != that1.Timestamp { - return false - } - return true -} -func (this *Metadata) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Metadata) - if !ok { - that2, ok := that.(Metadata) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Type != that1.Type { - return false - } - if this.HelpRef != that1.HelpRef { - return false - } - if this.UnitRef != that1.UnitRef { - return false - } - return true -} -func (this *Histogram) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Histogram) - if !ok { - that2, ok := that.(Histogram) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if that1.Count == nil { - if this.Count != nil { - return false - } - } else if this.Count == nil { - return false - } else if !this.Count.Equal(that1.Count) { - return false - } - if this.Sum != that1.Sum { - return false - } - if this.Schema != that1.Schema { - return false - } - if this.ZeroThreshold != that1.ZeroThreshold { - return false - } - if that1.ZeroCount == nil { - if this.ZeroCount != nil { - return false - } - } else if this.ZeroCount == nil { - return false - } else if !this.ZeroCount.Equal(that1.ZeroCount) { - return false - } - if len(this.NegativeSpans) != len(that1.NegativeSpans) { - return false - } - for i := range this.NegativeSpans { - if !this.NegativeSpans[i].Equal(&that1.NegativeSpans[i]) { - return false - } - } - if len(this.NegativeDeltas) != len(that1.NegativeDeltas) { - return false - } - for i := range this.NegativeDeltas { - if this.NegativeDeltas[i] != that1.NegativeDeltas[i] { - return false - } - } - if len(this.NegativeCounts) != len(that1.NegativeCounts) { - return false - } - for i := range this.NegativeCounts { - if this.NegativeCounts[i] != that1.NegativeCounts[i] { - return false - } - } - if len(this.PositiveSpans) != len(that1.PositiveSpans) { - return false - } - for i := range this.PositiveSpans { - if !this.PositiveSpans[i].Equal(&that1.PositiveSpans[i]) { - return false - } - } - if len(this.PositiveDeltas) != len(that1.PositiveDeltas) { - return false - } - for i := range this.PositiveDeltas { - if this.PositiveDeltas[i] != that1.PositiveDeltas[i] { - return false - } - } - if len(this.PositiveCounts) != len(that1.PositiveCounts) { - return false - } - for i := range this.PositiveCounts { - if this.PositiveCounts[i] != that1.PositiveCounts[i] { - return false - } - } - if this.ResetHint != that1.ResetHint { - return false - } - if this.Timestamp != that1.Timestamp { - return false - } - if len(this.CustomValues) != len(that1.CustomValues) { - return false - } - for i := range this.CustomValues { - if this.CustomValues[i] != that1.CustomValues[i] { - return false - } - } - return true -} -func (this *Histogram_CountInt) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Histogram_CountInt) - if !ok { - that2, ok := that.(Histogram_CountInt) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.CountInt != that1.CountInt { - return false - } - return true -} -func (this *Histogram_CountFloat) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Histogram_CountFloat) - if !ok { - that2, ok := that.(Histogram_CountFloat) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.CountFloat != that1.CountFloat { - return false - } - return true -} -func (this *Histogram_ZeroCountInt) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Histogram_ZeroCountInt) - if !ok { - that2, ok := that.(Histogram_ZeroCountInt) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ZeroCountInt != that1.ZeroCountInt { - return false - } - return true -} -func (this *Histogram_ZeroCountFloat) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Histogram_ZeroCountFloat) - if !ok { - that2, ok := that.(Histogram_ZeroCountFloat) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ZeroCountFloat != that1.ZeroCountFloat { - return false - } - return true -} -func (this *BucketSpan) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*BucketSpan) - if !ok { - that2, ok := that.(BucketSpan) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Offset != that1.Offset { - return false - } - if this.Length != that1.Length { - return false - } - return true -} -func (this *WriteRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&cortexpbv2.WriteRequest{") - s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n") - s = append(s, "Symbols: "+fmt.Sprintf("%#v", this.Symbols)+",\n") - s = append(s, "Timeseries: "+fmt.Sprintf("%#v", this.Timeseries)+",\n") - s = append(s, "SkipLabelNameValidation: "+fmt.Sprintf("%#v", this.SkipLabelNameValidation)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *WriteResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&cortexpbv2.WriteResponse{") - s = append(s, "Samples: "+fmt.Sprintf("%#v", this.Samples)+",\n") - s = append(s, "Histograms: "+fmt.Sprintf("%#v", this.Histograms)+",\n") - s = append(s, "Exemplars: "+fmt.Sprintf("%#v", this.Exemplars)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *TimeSeries) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 10) - s = append(s, "&cortexpbv2.TimeSeries{") - s = append(s, "LabelsRefs: "+fmt.Sprintf("%#v", this.LabelsRefs)+",\n") - if this.Samples != nil { - vs := make([]*Sample, len(this.Samples)) - for i := range vs { - vs[i] = &this.Samples[i] - } - s = append(s, "Samples: "+fmt.Sprintf("%#v", vs)+",\n") - } - if this.Histograms != nil { - vs := make([]*Histogram, len(this.Histograms)) - for i := range vs { - vs[i] = &this.Histograms[i] - } - s = append(s, "Histograms: "+fmt.Sprintf("%#v", vs)+",\n") - } - if this.Exemplars != nil { - vs := make([]*Exemplar, len(this.Exemplars)) - for i := range vs { - vs[i] = &this.Exemplars[i] - } - s = append(s, "Exemplars: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "Metadata: "+strings.Replace(this.Metadata.GoString(), `&`, ``, 1)+",\n") - s = append(s, "CreatedTimestamp: "+fmt.Sprintf("%#v", this.CreatedTimestamp)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Exemplar) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&cortexpbv2.Exemplar{") - s = append(s, "LabelsRefs: "+fmt.Sprintf("%#v", this.LabelsRefs)+",\n") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Sample) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&cortexpbv2.Sample{") - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Metadata) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&cortexpbv2.Metadata{") - s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") - s = append(s, "HelpRef: "+fmt.Sprintf("%#v", this.HelpRef)+",\n") - s = append(s, "UnitRef: "+fmt.Sprintf("%#v", this.UnitRef)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Histogram) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 20) - s = append(s, "&cortexpbv2.Histogram{") - if this.Count != nil { - s = append(s, "Count: "+fmt.Sprintf("%#v", this.Count)+",\n") - } - s = append(s, "Sum: "+fmt.Sprintf("%#v", this.Sum)+",\n") - s = append(s, "Schema: "+fmt.Sprintf("%#v", this.Schema)+",\n") - s = append(s, "ZeroThreshold: "+fmt.Sprintf("%#v", this.ZeroThreshold)+",\n") - if this.ZeroCount != nil { - s = append(s, "ZeroCount: "+fmt.Sprintf("%#v", this.ZeroCount)+",\n") - } - if this.NegativeSpans != nil { - vs := make([]*BucketSpan, len(this.NegativeSpans)) - for i := range vs { - vs[i] = &this.NegativeSpans[i] - } - s = append(s, "NegativeSpans: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "NegativeDeltas: "+fmt.Sprintf("%#v", this.NegativeDeltas)+",\n") - s = append(s, "NegativeCounts: "+fmt.Sprintf("%#v", this.NegativeCounts)+",\n") - if this.PositiveSpans != nil { - vs := make([]*BucketSpan, len(this.PositiveSpans)) - for i := range vs { - vs[i] = &this.PositiveSpans[i] - } - s = append(s, "PositiveSpans: "+fmt.Sprintf("%#v", vs)+",\n") - } - s = append(s, "PositiveDeltas: "+fmt.Sprintf("%#v", this.PositiveDeltas)+",\n") - s = append(s, "PositiveCounts: "+fmt.Sprintf("%#v", this.PositiveCounts)+",\n") - s = append(s, "ResetHint: "+fmt.Sprintf("%#v", this.ResetHint)+",\n") - s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") - s = append(s, "CustomValues: "+fmt.Sprintf("%#v", this.CustomValues)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Histogram_CountInt) GoString() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&cortexpbv2.Histogram_CountInt{` + - `CountInt:` + fmt.Sprintf("%#v", this.CountInt) + `}`}, ", ") - return s -} -func (this *Histogram_CountFloat) GoString() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&cortexpbv2.Histogram_CountFloat{` + - `CountFloat:` + fmt.Sprintf("%#v", this.CountFloat) + `}`}, ", ") - return s -} -func (this *Histogram_ZeroCountInt) GoString() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&cortexpbv2.Histogram_ZeroCountInt{` + - `ZeroCountInt:` + fmt.Sprintf("%#v", this.ZeroCountInt) + `}`}, ", ") - return s -} -func (this *Histogram_ZeroCountFloat) GoString() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&cortexpbv2.Histogram_ZeroCountFloat{` + - `ZeroCountFloat:` + fmt.Sprintf("%#v", this.ZeroCountFloat) + `}`}, ", ") - return s -} -func (this *BucketSpan) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&cortexpbv2.BucketSpan{") - s = append(s, "Offset: "+fmt.Sprintf("%#v", this.Offset)+",\n") - s = append(s, "Length: "+fmt.Sprintf("%#v", this.Length)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringCortexv2(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *WriteRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.SkipLabelNameValidation { - i-- - if m.SkipLabelNameValidation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x3e - i-- - dAtA[i] = 0xc0 - } - if len(m.Timeseries) > 0 { - for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.Timeseries[iNdEx].Size() - i -= size - if _, err := m.Timeseries[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintCortexv2(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if len(m.Symbols) > 0 { - for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Symbols[iNdEx]) - copy(dAtA[i:], m.Symbols[iNdEx]) - i = encodeVarintCortexv2(dAtA, i, uint64(len(m.Symbols[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if m.Source != 0 { - i = encodeVarintCortexv2(dAtA, i, uint64(m.Source)) - i-- - dAtA[i] = 0x18 - } - return len(dAtA) - i, nil -} - -func (m *WriteResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WriteResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WriteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Exemplars != 0 { - i = encodeVarintCortexv2(dAtA, i, uint64(m.Exemplars)) - i-- - dAtA[i] = 0x18 - } - if m.Histograms != 0 { - i = encodeVarintCortexv2(dAtA, i, uint64(m.Histograms)) - i-- - dAtA[i] = 0x10 - } - if m.Samples != 0 { - i = encodeVarintCortexv2(dAtA, i, uint64(m.Samples)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *TimeSeries) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CreatedTimestamp != 0 { - i = encodeVarintCortexv2(dAtA, i, uint64(m.CreatedTimestamp)) - i-- - dAtA[i] = 0x30 - } - { - size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCortexv2(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - if len(m.Exemplars) > 0 { - for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCortexv2(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.Histograms) > 0 { - for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCortexv2(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Samples) > 0 { - for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCortexv2(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.LabelsRefs) > 0 { - dAtA3 := make([]byte, len(m.LabelsRefs)*10) - var j2 int - for _, num := range m.LabelsRefs { - for num >= 1<<7 { - dAtA3[j2] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j2++ - } - dAtA3[j2] = uint8(num) - j2++ - } - i -= j2 - copy(dAtA[i:], dAtA3[:j2]) - i = encodeVarintCortexv2(dAtA, i, uint64(j2)) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Exemplar) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Timestamp != 0 { - i = encodeVarintCortexv2(dAtA, i, uint64(m.Timestamp)) - i-- - dAtA[i] = 0x18 - } - if m.Value != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) - i-- - dAtA[i] = 0x11 - } - if len(m.LabelsRefs) > 0 { - dAtA5 := make([]byte, len(m.LabelsRefs)*10) - var j4 int - for _, num := range m.LabelsRefs { - for num >= 1<<7 { - dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j4++ - } - dAtA5[j4] = uint8(num) - j4++ - } - i -= j4 - copy(dAtA[i:], dAtA5[:j4]) - i = encodeVarintCortexv2(dAtA, i, uint64(j4)) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Sample) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Sample) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Timestamp != 0 { - i = encodeVarintCortexv2(dAtA, i, uint64(m.Timestamp)) - i-- - dAtA[i] = 0x10 - } - if m.Value != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) - i-- - dAtA[i] = 0x9 - } - return len(dAtA) - i, nil -} - -func (m *Metadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.UnitRef != 0 { - i = encodeVarintCortexv2(dAtA, i, uint64(m.UnitRef)) - i-- - dAtA[i] = 0x20 - } - if m.HelpRef != 0 { - i = encodeVarintCortexv2(dAtA, i, uint64(m.HelpRef)) - i-- - dAtA[i] = 0x18 - } - if m.Type != 0 { - i = encodeVarintCortexv2(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Histogram) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.CustomValues) > 0 { - for iNdEx := len(m.CustomValues) - 1; iNdEx >= 0; iNdEx-- { - f6 := math.Float64bits(float64(m.CustomValues[iNdEx])) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6)) - } - i = encodeVarintCortexv2(dAtA, i, uint64(len(m.CustomValues)*8)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 - } - if m.Timestamp != 0 { - i = encodeVarintCortexv2(dAtA, i, uint64(m.Timestamp)) - i-- - dAtA[i] = 0x78 - } - if m.ResetHint != 0 { - i = encodeVarintCortexv2(dAtA, i, uint64(m.ResetHint)) - i-- - dAtA[i] = 0x70 - } - if len(m.PositiveCounts) > 0 { - for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- { - f7 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f7)) - } - i = encodeVarintCortexv2(dAtA, i, uint64(len(m.PositiveCounts)*8)) - i-- - dAtA[i] = 0x6a - } - if len(m.PositiveDeltas) > 0 { - var j8 int - dAtA10 := make([]byte, len(m.PositiveDeltas)*10) - for _, num := range m.PositiveDeltas { - x9 := (uint64(num) << 1) ^ uint64((num >> 63)) - for x9 >= 1<<7 { - dAtA10[j8] = uint8(uint64(x9)&0x7f | 0x80) - j8++ - x9 >>= 7 - } - dAtA10[j8] = uint8(x9) - j8++ - } - i -= j8 - copy(dAtA[i:], dAtA10[:j8]) - i = encodeVarintCortexv2(dAtA, i, uint64(j8)) - i-- - dAtA[i] = 0x62 - } - if len(m.PositiveSpans) > 0 { - for iNdEx := len(m.PositiveSpans) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.PositiveSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCortexv2(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - } - if len(m.NegativeCounts) > 0 { - for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- { - f11 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f11)) - } - i = encodeVarintCortexv2(dAtA, i, uint64(len(m.NegativeCounts)*8)) - i-- - dAtA[i] = 0x52 - } - if len(m.NegativeDeltas) > 0 { - var j12 int - dAtA14 := make([]byte, len(m.NegativeDeltas)*10) - for _, num := range m.NegativeDeltas { - x13 := (uint64(num) << 1) ^ uint64((num >> 63)) - for x13 >= 1<<7 { - dAtA14[j12] = uint8(uint64(x13)&0x7f | 0x80) - j12++ - x13 >>= 7 - } - dAtA14[j12] = uint8(x13) - j12++ - } - i -= j12 - copy(dAtA[i:], dAtA14[:j12]) - i = encodeVarintCortexv2(dAtA, i, uint64(j12)) - i-- - dAtA[i] = 0x4a - } - if len(m.NegativeSpans) > 0 { - for iNdEx := len(m.NegativeSpans) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.NegativeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCortexv2(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - if m.ZeroCount != nil { - { - size := m.ZeroCount.Size() - i -= size - if _, err := m.ZeroCount.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if m.ZeroThreshold != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) - i-- - dAtA[i] = 0x29 - } - if m.Schema != 0 { - i = encodeVarintCortexv2(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31)))) - i-- - dAtA[i] = 0x20 - } - if m.Sum != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) - i-- - dAtA[i] = 0x19 - } - if m.Count != nil { - { - size := m.Count.Size() - i -= size - if _, err := m.Count.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *Histogram_CountInt) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *Histogram_CountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintCortexv2(dAtA, i, uint64(m.CountInt)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} -func (m *Histogram_CountFloat) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *Histogram_CountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CountFloat)))) - i-- - dAtA[i] = 0x11 - return len(dAtA) - i, nil -} -func (m *Histogram_ZeroCountInt) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *Histogram_ZeroCountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintCortexv2(dAtA, i, uint64(m.ZeroCountInt)) - i-- - dAtA[i] = 0x30 - return len(dAtA) - i, nil -} -func (m *Histogram_ZeroCountFloat) MarshalTo(dAtA []byte) (int, error) { - return m.MarshalToSizedBuffer(dAtA[:m.Size()]) -} - -func (m *Histogram_ZeroCountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat)))) - i-- - dAtA[i] = 0x39 - return len(dAtA) - i, nil -} -func (m *BucketSpan) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Length != 0 { - i = encodeVarintCortexv2(dAtA, i, uint64(m.Length)) - i-- - dAtA[i] = 0x10 - } - if m.Offset != 0 { - i = encodeVarintCortexv2(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintCortexv2(dAtA []byte, offset int, v uint64) int { - offset -= sovCortexv2(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *WriteRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Source != 0 { - n += 1 + sovCortexv2(uint64(m.Source)) - } - if len(m.Symbols) > 0 { - for _, s := range m.Symbols { - l = len(s) - n += 1 + l + sovCortexv2(uint64(l)) - } - } - if len(m.Timeseries) > 0 { - for _, e := range m.Timeseries { - l = e.Size() - n += 1 + l + sovCortexv2(uint64(l)) - } - } - if m.SkipLabelNameValidation { - n += 3 - } - return n -} - -func (m *WriteResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Samples != 0 { - n += 1 + sovCortexv2(uint64(m.Samples)) - } - if m.Histograms != 0 { - n += 1 + sovCortexv2(uint64(m.Histograms)) - } - if m.Exemplars != 0 { - n += 1 + sovCortexv2(uint64(m.Exemplars)) - } - return n -} - -func (m *TimeSeries) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.LabelsRefs) > 0 { - l = 0 - for _, e := range m.LabelsRefs { - l += sovCortexv2(uint64(e)) - } - n += 1 + sovCortexv2(uint64(l)) + l - } - if len(m.Samples) > 0 { - for _, e := range m.Samples { - l = e.Size() - n += 1 + l + sovCortexv2(uint64(l)) - } - } - if len(m.Histograms) > 0 { - for _, e := range m.Histograms { - l = e.Size() - n += 1 + l + sovCortexv2(uint64(l)) - } - } - if len(m.Exemplars) > 0 { - for _, e := range m.Exemplars { - l = e.Size() - n += 1 + l + sovCortexv2(uint64(l)) - } - } - l = m.Metadata.Size() - n += 1 + l + sovCortexv2(uint64(l)) - if m.CreatedTimestamp != 0 { - n += 1 + sovCortexv2(uint64(m.CreatedTimestamp)) - } - return n -} - -func (m *Exemplar) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.LabelsRefs) > 0 { - l = 0 - for _, e := range m.LabelsRefs { - l += sovCortexv2(uint64(e)) - } - n += 1 + sovCortexv2(uint64(l)) + l - } - if m.Value != 0 { - n += 9 - } - if m.Timestamp != 0 { - n += 1 + sovCortexv2(uint64(m.Timestamp)) - } - return n -} - -func (m *Sample) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Value != 0 { - n += 9 - } - if m.Timestamp != 0 { - n += 1 + sovCortexv2(uint64(m.Timestamp)) - } - return n -} - -func (m *Metadata) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Type != 0 { - n += 1 + sovCortexv2(uint64(m.Type)) - } - if m.HelpRef != 0 { - n += 1 + sovCortexv2(uint64(m.HelpRef)) - } - if m.UnitRef != 0 { - n += 1 + sovCortexv2(uint64(m.UnitRef)) - } - return n -} - -func (m *Histogram) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Count != nil { - n += m.Count.Size() - } - if m.Sum != 0 { - n += 9 - } - if m.Schema != 0 { - n += 1 + sozCortexv2(uint64(m.Schema)) - } - if m.ZeroThreshold != 0 { - n += 9 - } - if m.ZeroCount != nil { - n += m.ZeroCount.Size() - } - if len(m.NegativeSpans) > 0 { - for _, e := range m.NegativeSpans { - l = e.Size() - n += 1 + l + sovCortexv2(uint64(l)) - } - } - if len(m.NegativeDeltas) > 0 { - l = 0 - for _, e := range m.NegativeDeltas { - l += sozCortexv2(uint64(e)) - } - n += 1 + sovCortexv2(uint64(l)) + l - } - if len(m.NegativeCounts) > 0 { - n += 1 + sovCortexv2(uint64(len(m.NegativeCounts)*8)) + len(m.NegativeCounts)*8 - } - if len(m.PositiveSpans) > 0 { - for _, e := range m.PositiveSpans { - l = e.Size() - n += 1 + l + sovCortexv2(uint64(l)) - } - } - if len(m.PositiveDeltas) > 0 { - l = 0 - for _, e := range m.PositiveDeltas { - l += sozCortexv2(uint64(e)) - } - n += 1 + sovCortexv2(uint64(l)) + l - } - if len(m.PositiveCounts) > 0 { - n += 1 + sovCortexv2(uint64(len(m.PositiveCounts)*8)) + len(m.PositiveCounts)*8 - } - if m.ResetHint != 0 { - n += 1 + sovCortexv2(uint64(m.ResetHint)) - } - if m.Timestamp != 0 { - n += 1 + sovCortexv2(uint64(m.Timestamp)) - } - if len(m.CustomValues) > 0 { - n += 2 + sovCortexv2(uint64(len(m.CustomValues)*8)) + len(m.CustomValues)*8 - } - return n -} - -func (m *Histogram_CountInt) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovCortexv2(uint64(m.CountInt)) - return n -} -func (m *Histogram_CountFloat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *Histogram_ZeroCountInt) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovCortexv2(uint64(m.ZeroCountInt)) - return n -} -func (m *Histogram_ZeroCountFloat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 9 - return n -} -func (m *BucketSpan) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Offset != 0 { - n += 1 + sozCortexv2(uint64(m.Offset)) - } - if m.Length != 0 { - n += 1 + sovCortexv2(uint64(m.Length)) - } - return n -} - -func sovCortexv2(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozCortexv2(x uint64) (n int) { - return sovCortexv2(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *WriteRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WriteRequest{`, - `Source:` + fmt.Sprintf("%v", this.Source) + `,`, - `Symbols:` + fmt.Sprintf("%v", this.Symbols) + `,`, - `Timeseries:` + fmt.Sprintf("%v", this.Timeseries) + `,`, - `SkipLabelNameValidation:` + fmt.Sprintf("%v", this.SkipLabelNameValidation) + `,`, - `}`, - }, "") - return s -} -func (this *WriteResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WriteResponse{`, - `Samples:` + fmt.Sprintf("%v", this.Samples) + `,`, - `Histograms:` + fmt.Sprintf("%v", this.Histograms) + `,`, - `Exemplars:` + fmt.Sprintf("%v", this.Exemplars) + `,`, - `}`, - }, "") - return s -} -func (this *TimeSeries) String() string { - if this == nil { - return "nil" - } - repeatedStringForSamples := "[]Sample{" - for _, f := range this.Samples { - repeatedStringForSamples += strings.Replace(strings.Replace(f.String(), "Sample", "Sample", 1), `&`, ``, 1) + "," - } - repeatedStringForSamples += "}" - repeatedStringForHistograms := "[]Histogram{" - for _, f := range this.Histograms { - repeatedStringForHistograms += strings.Replace(strings.Replace(f.String(), "Histogram", "Histogram", 1), `&`, ``, 1) + "," - } - repeatedStringForHistograms += "}" - repeatedStringForExemplars := "[]Exemplar{" - for _, f := range this.Exemplars { - repeatedStringForExemplars += strings.Replace(strings.Replace(f.String(), "Exemplar", "Exemplar", 1), `&`, ``, 1) + "," - } - repeatedStringForExemplars += "}" - s := strings.Join([]string{`&TimeSeries{`, - `LabelsRefs:` + fmt.Sprintf("%v", this.LabelsRefs) + `,`, - `Samples:` + repeatedStringForSamples + `,`, - `Histograms:` + repeatedStringForHistograms + `,`, - `Exemplars:` + repeatedStringForExemplars + `,`, - `Metadata:` + strings.Replace(strings.Replace(this.Metadata.String(), "Metadata", "Metadata", 1), `&`, ``, 1) + `,`, - `CreatedTimestamp:` + fmt.Sprintf("%v", this.CreatedTimestamp) + `,`, - `}`, - }, "") - return s -} -func (this *Exemplar) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Exemplar{`, - `LabelsRefs:` + fmt.Sprintf("%v", this.LabelsRefs) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, - `}`, - }, "") - return s -} -func (this *Sample) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Sample{`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, - `}`, - }, "") - return s -} -func (this *Metadata) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Metadata{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `HelpRef:` + fmt.Sprintf("%v", this.HelpRef) + `,`, - `UnitRef:` + fmt.Sprintf("%v", this.UnitRef) + `,`, - `}`, - }, "") - return s -} -func (this *Histogram) String() string { - if this == nil { - return "nil" - } - repeatedStringForNegativeSpans := "[]BucketSpan{" - for _, f := range this.NegativeSpans { - repeatedStringForNegativeSpans += strings.Replace(strings.Replace(f.String(), "BucketSpan", "BucketSpan", 1), `&`, ``, 1) + "," - } - repeatedStringForNegativeSpans += "}" - repeatedStringForPositiveSpans := "[]BucketSpan{" - for _, f := range this.PositiveSpans { - repeatedStringForPositiveSpans += strings.Replace(strings.Replace(f.String(), "BucketSpan", "BucketSpan", 1), `&`, ``, 1) + "," - } - repeatedStringForPositiveSpans += "}" - s := strings.Join([]string{`&Histogram{`, - `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `Sum:` + fmt.Sprintf("%v", this.Sum) + `,`, - `Schema:` + fmt.Sprintf("%v", this.Schema) + `,`, - `ZeroThreshold:` + fmt.Sprintf("%v", this.ZeroThreshold) + `,`, - `ZeroCount:` + fmt.Sprintf("%v", this.ZeroCount) + `,`, - `NegativeSpans:` + repeatedStringForNegativeSpans + `,`, - `NegativeDeltas:` + fmt.Sprintf("%v", this.NegativeDeltas) + `,`, - `NegativeCounts:` + fmt.Sprintf("%v", this.NegativeCounts) + `,`, - `PositiveSpans:` + repeatedStringForPositiveSpans + `,`, - `PositiveDeltas:` + fmt.Sprintf("%v", this.PositiveDeltas) + `,`, - `PositiveCounts:` + fmt.Sprintf("%v", this.PositiveCounts) + `,`, - `ResetHint:` + fmt.Sprintf("%v", this.ResetHint) + `,`, - `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, - `CustomValues:` + fmt.Sprintf("%v", this.CustomValues) + `,`, - `}`, - }, "") - return s -} -func (this *Histogram_CountInt) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Histogram_CountInt{`, - `CountInt:` + fmt.Sprintf("%v", this.CountInt) + `,`, - `}`, - }, "") - return s -} -func (this *Histogram_CountFloat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Histogram_CountFloat{`, - `CountFloat:` + fmt.Sprintf("%v", this.CountFloat) + `,`, - `}`, - }, "") - return s -} -func (this *Histogram_ZeroCountInt) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Histogram_ZeroCountInt{`, - `ZeroCountInt:` + fmt.Sprintf("%v", this.ZeroCountInt) + `,`, - `}`, - }, "") - return s -} -func (this *Histogram_ZeroCountFloat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Histogram_ZeroCountFloat{`, - `ZeroCountFloat:` + fmt.Sprintf("%v", this.ZeroCountFloat) + `,`, - `}`, - }, "") - return s -} -func (this *BucketSpan) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&BucketSpan{`, - `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, - `Length:` + fmt.Sprintf("%v", this.Length) + `,`, - `}`, - }, "") - return s -} -func valueToStringCortexv2(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *WriteRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WriteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) - } - m.Source = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Source |= WriteRequest_SourceEnum(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCortexv2 - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCortexv2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Symbols = append(m.Symbols, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortexv2 - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortexv2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Timeseries = append(m.Timeseries, PreallocTimeseriesV2{}) - if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 1000: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SkipLabelNameValidation", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SkipLabelNameValidation = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipCortexv2(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortexv2 - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortexv2 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WriteResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WriteResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WriteResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) - } - m.Samples = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Samples |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) - } - m.Histograms = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Histograms |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) - } - m.Exemplars = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Exemplars |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipCortexv2(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortexv2 - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortexv2 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TimeSeries) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType == 0 { - var v uint32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.LabelsRefs = append(m.LabelsRefs, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthCortexv2 - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthCortexv2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.LabelsRefs) == 0 { - m.LabelsRefs = make([]uint32, 0, elementCount) - } - for iNdEx < postIndex { - var v uint32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.LabelsRefs = append(m.LabelsRefs, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType) - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortexv2 - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortexv2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Samples = append(m.Samples, Sample{}) - if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortexv2 - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortexv2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Histograms = append(m.Histograms, Histogram{}) - if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortexv2 - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortexv2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Exemplars = append(m.Exemplars, Exemplar{}) - if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortexv2 - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortexv2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) - } - m.CreatedTimestamp = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedTimestamp |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipCortexv2(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortexv2 - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortexv2 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Exemplar) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Exemplar: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType == 0 { - var v uint32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.LabelsRefs = append(m.LabelsRefs, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthCortexv2 - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthCortexv2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.LabelsRefs) == 0 { - m.LabelsRefs = make([]uint32, 0, elementCount) - } - for iNdEx < postIndex { - var v uint32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.LabelsRefs = append(m.LabelsRefs, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType) - } - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = float64(math.Float64frombits(v)) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - m.Timestamp = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Timestamp |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipCortexv2(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortexv2 - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortexv2 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Sample) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Sample: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Value = float64(math.Float64frombits(v)) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - m.Timestamp = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Timestamp |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipCortexv2(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortexv2 - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortexv2 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Metadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= Metadata_MetricType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HelpRef", wireType) - } - m.HelpRef = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HelpRef |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UnitRef", wireType) - } - m.UnitRef = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UnitRef |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipCortexv2(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortexv2 - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortexv2 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Histogram) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Histogram: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CountInt", wireType) - } - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Count = &Histogram_CountInt{v} - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field CountFloat", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Count = &Histogram_CountFloat{float64(math.Float64frombits(v))} - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Sum = float64(math.Float64frombits(v)) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) - m.Schema = v - case 5: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.ZeroThreshold = float64(math.Float64frombits(v)) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountInt", wireType) - } - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ZeroCount = &Histogram_ZeroCountInt{v} - case 7: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType) - } - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.ZeroCount = &Histogram_ZeroCountFloat{float64(math.Float64frombits(v))} - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpans", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortexv2 - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortexv2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NegativeSpans = append(m.NegativeSpans, BucketSpan{}) - if err := m.NegativeSpans[len(m.NegativeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) - m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthCortexv2 - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthCortexv2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.NegativeDeltas) == 0 { - m.NegativeDeltas = make([]int64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) - m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field NegativeDeltas", wireType) - } - case 10: - if wireType == 1 { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.NegativeCounts = append(m.NegativeCounts, v2) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthCortexv2 - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthCortexv2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - elementCount = packedLen / 8 - if elementCount != 0 && len(m.NegativeCounts) == 0 { - m.NegativeCounts = make([]float64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.NegativeCounts = append(m.NegativeCounts, v2) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field NegativeCounts", wireType) - } - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpans", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCortexv2 - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCortexv2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PositiveSpans = append(m.PositiveSpans, BucketSpan{}) - if err := m.PositiveSpans[len(m.PositiveSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) - m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthCortexv2 - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthCortexv2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.PositiveDeltas) == 0 { - m.PositiveDeltas = make([]int64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) - m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field PositiveDeltas", wireType) - } - case 13: - if wireType == 1 { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.PositiveCounts = append(m.PositiveCounts, v2) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthCortexv2 - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthCortexv2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - elementCount = packedLen / 8 - if elementCount != 0 && len(m.PositiveCounts) == 0 { - m.PositiveCounts = make([]float64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.PositiveCounts = append(m.PositiveCounts, v2) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field PositiveCounts", wireType) - } - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ResetHint", wireType) - } - m.ResetHint = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ResetHint |= Histogram_ResetHint(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - m.Timestamp = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Timestamp |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 16: - if wireType == 1 { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.CustomValues = append(m.CustomValues, v2) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthCortexv2 - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthCortexv2 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - elementCount = packedLen / 8 - if elementCount != 0 && len(m.CustomValues) == 0 { - m.CustomValues = make([]float64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - v2 := float64(math.Float64frombits(v)) - m.CustomValues = append(m.CustomValues, v2) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field CustomValues", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipCortexv2(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortexv2 - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortexv2 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BucketSpan) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) - m.Offset = v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) - } - m.Length = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Length |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipCortexv2(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCortexv2 - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCortexv2 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipCortexv2(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthCortexv2 - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthCortexv2 - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCortexv2 - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipCortexv2(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthCortexv2 - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthCortexv2 = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCortexv2 = fmt.Errorf("proto: integer overflow") -) diff --git a/pkg/cortexpbv2/cortexv2.proto b/pkg/cortexpbv2/cortexv2.proto deleted file mode 100644 index c56038b9f0..0000000000 --- a/pkg/cortexpbv2/cortexv2.proto +++ /dev/null @@ -1,227 +0,0 @@ -syntax = "proto3"; - -package cortexpbv2; - -option go_package = "cortexpbv2"; - -import "gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -// https://github.com/prometheus/prometheus/blob/main/prompb/io/prometheus/write/v2/types.proto -message WriteRequest { - reserved 1 to 2; - enum SourceEnum { - API = 0; - RULE = 1; - } - SourceEnum Source = 3; - repeated string symbols = 4; - repeated TimeSeries timeseries = 5 [(gogoproto.nullable) = false, (gogoproto.customtype) = "PreallocTimeseriesV2"]; - - bool skip_label_name_validation = 1000; // set intentionally high to keep WriteRequest compatible with upstream Prometheus -} - -message WriteResponse { - // Samples represents X-Prometheus-Remote-Write-Written-Samples - int64 Samples = 1; - // Histograms represents X-Prometheus-Remote-Write-Written-Histograms - int64 Histograms = 2; - // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars - int64 Exemplars = 3; -} - -message TimeSeries { - repeated uint32 labels_refs = 1; - // Timeseries messages can either specify samples or (native) histogram samples - // (histogram field), but not both. For a typical sender (real-time metric - // streaming), in healthy cases, there will be only one sample or histogram. - // - // Samples and histograms are sorted by timestamp (older first). - repeated Sample samples = 2 [(gogoproto.nullable) = false]; - repeated Histogram histograms = 3 [(gogoproto.nullable) = false]; - - // exemplars represents an optional set of exemplars attached to this series' samples. - repeated Exemplar exemplars = 4 [(gogoproto.nullable) = false]; - - // metadata represents the metadata associated with the given series' samples. - Metadata metadata = 5 [(gogoproto.nullable) = false]; - - // created_timestamp represents an optional created timestamp associated with - // this series' samples in ms format, typically for counter or histogram type - // metrics. Created timestamp represents the time when the counter started - // counting (sometimes referred to as start timestamp), which can increase - // the accuracy of query results. - // - // Note that some receivers might require this and in return fail to - // ingest such samples within the Request. - // - // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go - // for conversion from/to time.Time to Prometheus timestamp. - // - // Note that the "optional" keyword is omitted due to - // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields - // Zero value means value not set. If you need to use exactly zero value for - // the timestamp, use 1 millisecond before or after. - int64 created_timestamp = 6; -} - -// Exemplar is an additional information attached to some series' samples. -// It is typically used to attach an example trace or request ID associated with -// the metric changes. -message Exemplar { - // labels_refs is an optional list of label name-value pair references, encoded - // as indices to the Request.symbols array. This list's len is always - // a multiple of 2, and the underlying labels should be sorted lexicographically. - // If the exemplar references a trace it should use the `trace_id` label name, as a best practice. - repeated uint32 labels_refs = 1; - // value represents an exact example value. This can be useful when the exemplar - // is attached to a histogram, which only gives an estimated value through buckets. - double value = 2; - // timestamp represents the timestamp of the exemplar in ms. - // - // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go - // for conversion from/to time.Time to Prometheus timestamp. - int64 timestamp = 3; -} - -// Sample represents series sample. -message Sample { - // value of the sample. - double value = 1; - // timestamp represents timestamp of the sample in ms. - // - // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go - // for conversion from/to time.Time to Prometheus timestamp. - int64 timestamp = 2; -} - -// Metadata represents the metadata associated with the given series' samples. -message Metadata { - enum MetricType { - METRIC_TYPE_UNSPECIFIED = 0; - METRIC_TYPE_COUNTER = 1; - METRIC_TYPE_GAUGE = 2; - METRIC_TYPE_HISTOGRAM = 3; - METRIC_TYPE_GAUGEHISTOGRAM = 4; - METRIC_TYPE_SUMMARY = 5; - METRIC_TYPE_INFO = 6; - METRIC_TYPE_STATESET = 7; - } - MetricType type = 1; - // help_ref is a reference to the Request.symbols array representing help - // text for the metric. Help is optional, reference should point to an empty string in - // such a case. - uint32 help_ref = 3; - // unit_ref is a reference to the Request.symbols array representing a unit - // for the metric. Unit is optional, reference should point to an empty string in - // such a case. - uint32 unit_ref = 4; -} - -// A native histogram, also known as a sparse histogram. -// Original design doc: -// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit -// The appendix of this design doc also explains the concept of float -// histograms. This Histogram message can represent both, the usual -// integer histogram as well as a float histogram. -message Histogram { - enum ResetHint { - RESET_HINT_UNSPECIFIED = 0; // Need to test for a counter reset explicitly. - RESET_HINT_YES = 1; // This is the 1st histogram after a counter reset. - RESET_HINT_NO = 2; // There was no counter reset between this and the previous Histogram. - RESET_HINT_GAUGE = 3; // This is a gauge histogram where counter resets don't happen. - } - - oneof count { // Count of observations in the histogram. - uint64 count_int = 1; - double count_float = 2; - } - double sum = 3; // Sum of observations in the histogram. - - // The schema defines the bucket schema. Currently, valid numbers - // are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be - // added in future for new bucketing layouts. - // - // The schema equal to -53 means custom buckets. See - // custom_values field description for more details. - // - // Values between -4 and 8 represent base-2 bucket schema, where 1 - // is a bucket boundary in each case, and then each power of two is - // divided into 2^n (n is schema value) logarithmic buckets. Or in other words, - // each bucket boundary is the previous boundary times 2^(2^-n). - sint32 schema = 4; - double zero_threshold = 5; // Breadth of the zero bucket. - oneof zero_count { // Count in zero bucket. - uint64 zero_count_int = 6; - double zero_count_float = 7; - } - - // Negative Buckets. - repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false]; - // Use either "negative_deltas" or "negative_counts", the former for - // regular histograms with integer counts, the latter for - // float histograms. - repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). - repeated double negative_counts = 10; // Absolute count of each bucket. - - // Positive Buckets. - // - // In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows: - // * The span offset+length points to an the index of the custom_values array - // or +Inf if pointing to the len of the array. - // * The counts and deltas have the same meaning as for exponential histograms. - repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false]; - // Use either "positive_deltas" or "positive_counts", the former for - // regular histograms with integer counts, the latter for - // float histograms. - repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). - repeated double positive_counts = 13; // Absolute count of each bucket. - - ResetHint reset_hint = 14; - // timestamp represents timestamp of the sample in ms. - // - // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go - // for conversion from/to time.Time to Prometheus timestamp. - int64 timestamp = 15; - - // custom_values is an additional field used by non-exponential bucketing layouts. - // - // For custom buckets (-53 schema value) custom_values specify monotonically - // increasing upper inclusive boundaries for the bucket counts with arbitrary - // widths for this histogram. In other words, custom_values represents custom, - // explicit bucketing that could have been converted from the classic histograms. - // - // Those bounds are then referenced by spans in positive_spans with corresponding positive - // counts of deltas (refer to positive_spans for more details). This way we can - // have encode sparse histograms with custom bucketing (many buckets are often - // not used). - // - // Note that for custom bounds, even negative observations are placed in the positive - // counts to simplify the implementation and avoid ambiguity of where to place - // an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and - // the zero bucket are unused, if the schema indicates custom bucketing. - // - // For each upper boundary the previous boundary represent the lower exclusive - // boundary for that bucket. The first element is the upper inclusive boundary - // for the first bucket, which implicitly has a lower inclusive bound of -Inf. - // This is similar to "le" label semantics on classic histograms. You may add a - // bucket with an upper bound of 0 to make sure that you really have no negative - // observations, but in practice, native histogram rendering will show both with - // or without first upper boundary 0 and no negative counts as the same case. - // - // The last element is not only the upper inclusive bound of the last regular - // bucket, but implicitly the lower exclusive bound of the +Inf bucket. - repeated double custom_values = 16; -} - -// A BucketSpan defines a number of consecutive buckets with their -// offset. Logically, it would be more straightforward to include the -// bucket counts in the Span. However, the protobuf representation is -// more compact in the way the data is structured here (with all the -// buckets in a single array separate from the Spans). -message BucketSpan { - sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative). - uint32 length = 2; // Length of consecutive buckets. -} diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 5cd0c13e88..75a3dfc423 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -29,7 +29,6 @@ import ( "go.uber.org/atomic" "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/cortexpbv2" "github.com/cortexproject/cortex/pkg/ha" "github.com/cortexproject/cortex/pkg/ingester" ingester_client "github.com/cortexproject/cortex/pkg/ingester/client" @@ -46,7 +45,7 @@ import ( ) var ( - emptyPreallocSeriesV2 = cortexpbv2.PreallocTimeseriesV2{} + emptyPreallocSeriesV2 = cortexpb.PreallocTimeseriesV2{} emptyPreallocSeries = cortexpb.PreallocTimeseries{} supportedShardingStrategies = []string{util.ShardingStrategyDefault, util.ShardingStrategyShuffle} @@ -631,12 +630,12 @@ func (d *Distributor) validateSeries(ts cortexpb.PreallocTimeseries, userID stri nil } -func (d *Distributor) prepareSeriesKeysV2(ctx context.Context, req *cortexpbv2.WriteRequest, userID string, limits *validation.Limits, b labels.ScratchBuilder, st *writev2.SymbolsTable, removeReplica bool) ([]uint32, []cortexpbv2.PreallocTimeseriesV2, int64, int64, int64, int64, error, error) { +func (d *Distributor) prepareSeriesKeysV2(ctx context.Context, req *cortexpb.WriteRequestV2, userID string, limits *validation.Limits, b labels.ScratchBuilder, st *writev2.SymbolsTable, removeReplica bool) ([]uint32, []cortexpb.PreallocTimeseriesV2, int64, int64, int64, int64, error, error) { pSpan, _ := opentracing.StartSpanFromContext(ctx, "prepareSeriesKeysV2") defer pSpan.Finish() // For each timeseries or samples, we compute a hash to distribute across ingesters; // check each sample/metadata and discard if outside limits. - validatedTimeseries := make([]cortexpbv2.PreallocTimeseriesV2, 0, len(req.Timeseries)) + validatedTimeseries := make([]cortexpb.PreallocTimeseriesV2, 0, len(req.Timeseries)) seriesKeys := make([]uint32, 0, len(req.Timeseries)) validatedFloatSamples := 0 validatedHistogramSamples := 0 @@ -659,10 +658,10 @@ func (d *Distributor) prepareSeriesKeysV2(ctx context.Context, req *cortexpbv2.W for _, ts := range req.Timeseries { // Use timestamp of latest sample in the series. If samples for series are not ordered, metric for user may be wrong. if len(ts.Samples) > 0 { - latestSampleTimestampMs = max(latestSampleTimestampMs, ts.Samples[len(ts.Samples)-1].Timestamp) + latestSampleTimestampMs = max(latestSampleTimestampMs, ts.Samples[len(ts.Samples)-1].TimestampMs) } if len(ts.Histograms) > 0 { - latestSampleTimestampMs = max(latestSampleTimestampMs, ts.Histograms[len(ts.Histograms)-1].Timestamp) + latestSampleTimestampMs = max(latestSampleTimestampMs, ts.Histograms[len(ts.Histograms)-1].TimestampMs) } lbs := ts.ToLabels(&b, req.Symbols) @@ -737,7 +736,7 @@ func (d *Distributor) prepareSeriesKeysV2(ctx context.Context, req *cortexpbv2.W firstPartialErr = httpgrpc.Errorf(http.StatusBadRequest, validationErr.Error()) } - if ts.Metadata.Type != cortexpbv2.METRIC_TYPE_UNSPECIFIED { + if ts.Metadata.Type != cortexpb.UNKNOWN { // since metadata is attached, count only metadata that is not METRIC_TYPE_UNSPECIFIED. validatedMetadata++ } @@ -757,7 +756,7 @@ func (d *Distributor) prepareSeriesKeysV2(ctx context.Context, req *cortexpbv2.W return seriesKeys, validatedTimeseries, int64(validatedMetadata), int64(validatedFloatSamples), int64(validatedHistogramSamples), int64(validatedExemplars), firstPartialErr, nil } -func (d *Distributor) doBatchV2(ctx context.Context, req *cortexpbv2.WriteRequest, subRing ring.ReadRing, keys []uint32, validatedTimeseries []cortexpbv2.PreallocTimeseriesV2, userID string, stats *WriteStats) error { +func (d *Distributor) doBatchV2(ctx context.Context, req *cortexpb.WriteRequestV2, subRing ring.ReadRing, keys []uint32, validatedTimeseries []cortexpb.PreallocTimeseriesV2, userID string, stats *WriteStats) error { span, _ := opentracing.StartSpanFromContext(ctx, "doBatchV2") defer span.Finish() @@ -781,7 +780,7 @@ func (d *Distributor) doBatchV2(ctx context.Context, req *cortexpbv2.WriteReques } return ring.DoBatch(ctx, op, subRing, keys, func(ingester ring.InstanceDesc, indexes []int) error { - timeseries := make([]cortexpbv2.PreallocTimeseriesV2, 0, len(indexes)) + timeseries := make([]cortexpb.PreallocTimeseriesV2, 0, len(indexes)) for _, i := range indexes { timeseries = append(timeseries, validatedTimeseries[i]) @@ -789,12 +788,12 @@ func (d *Distributor) doBatchV2(ctx context.Context, req *cortexpbv2.WriteReques return d.sendV2(localCtx, req.Symbols, ingester, timeseries, req.Source, stats) }, func() { - cortexpbv2.ReuseSlice(req.Timeseries) + cortexpb.ReuseSliceV2(req.Timeseries) cancel() }) } -func (d *Distributor) sendV2(ctx context.Context, symbols []string, ingester ring.InstanceDesc, timeseries []cortexpbv2.PreallocTimeseriesV2, source cortexpbv2.WriteRequest_SourceEnum, stats *WriteStats) error { +func (d *Distributor) sendV2(ctx context.Context, symbols []string, ingester ring.InstanceDesc, timeseries []cortexpb.PreallocTimeseriesV2, source cortexpb.SourceEnum, stats *WriteStats) error { h, err := d.ingesterPool.GetClientFor(ingester.Addr) if err != nil { return err @@ -807,14 +806,24 @@ func (d *Distributor) sendV2(ctx context.Context, symbols []string, ingester rin c := h.(ingester_client.HealthAndIngesterClient) - req := cortexpbv2.PreallocWriteRequestV2FromPool() + req := cortexpb.PreallocWriteRequestV2FromPool() req.Symbols = symbols req.Timeseries = timeseries req.Source = source resp, err := c.PushPreAllocV2(ctx, req) if err == nil { - cortexpbv2.ReuseWriteRequestV2(req) + cortexpb.ReuseWriteRequestV2(req) + } + + if err != nil && strings.Contains(err.Error(), "unknown method PushV2") { + // To handle rolling update where distributor can handle PRW2.0 but Ingesters are not. + // Convert V2 timeseries to V1 timesereis and metadata and then send PRW1.0 request to Ingester + v1Ts, v1Metadata, err := d.convertV2ToV1(symbols, timeseries) + if err != nil { + return err + } + return d.send(ctx, ingester, v1Ts, v1Metadata, source) } if len(timeseries) > 0 { @@ -825,7 +834,7 @@ func (d *Distributor) sendV2(ctx context.Context, symbols []string, ingester rin metadataAppend := false for _, ts := range timeseries { - if ts.Metadata.Type != cortexpbv2.METRIC_TYPE_UNSPECIFIED { + if ts.Metadata.Type != cortexpb.UNKNOWN { metadataAppend = true break } @@ -848,33 +857,70 @@ func (d *Distributor) sendV2(ctx context.Context, symbols []string, ingester rin return err } +func (d *Distributor) convertV2ToV1(symbols []string, timeseries []cortexpb.PreallocTimeseriesV2) ([]cortexpb.PreallocTimeseries, []*cortexpb.MetricMetadata, error) { + var v1Timeseries []cortexpb.PreallocTimeseries + var v1Metadata []*cortexpb.MetricMetadata + + b := labels.NewScratchBuilder(0) + for _, v2Ts := range timeseries { + las := cortexpb.FromLabelsToLabelAdapters(v2Ts.ToLabels(&b, symbols)) + v1Timeseries = append(v1Timeseries, cortexpb.PreallocTimeseries{ + TimeSeries: &cortexpb.TimeSeries{ + Labels: las, + Samples: v2Ts.Samples, + Exemplars: d.convertV1ToV2Exemplars(b, symbols, v2Ts.Exemplars), + Histograms: v2Ts.Histograms, + }, + }) + metricName, err := extract.MetricNameFromLabelAdapters(las) + if err != nil { + return nil, nil, err + } + v1Metadata = append(v1Metadata, v2Ts.Metadata.ToV1Metadata(metricName, symbols)) + } + + return v1Timeseries, v1Metadata, nil +} + +func (d *Distributor) convertV1ToV2Exemplars(b labels.ScratchBuilder, symbols []string, v2Exemplars []cortexpb.ExemplarV2) []cortexpb.Exemplar { + v1Exemplars := make([]cortexpb.Exemplar, 0, len(v2Exemplars)) + for _, e := range v2Exemplars { + v1Exemplars = append(v1Exemplars, cortexpb.Exemplar{ + Labels: cortexpb.FromLabelsToLabelAdapters(e.ToLabels(&b, symbols)), + Value: e.Value, + TimestampMs: e.Timestamp, + }) + } + return v1Exemplars +} + // Validates a single series from a write request. Will remove labels if // any are configured to be dropped for the user ID. // Returns the validated series with it's labels/samples, and any error. // The returned error may retain the series labels. -func (d *Distributor) validateSeriesV2(ts cortexpbv2.PreallocTimeseriesV2, seriesLabels []cortexpb.LabelAdapter, symbols []string, userID string, skipLabelNameValidation bool, limits *validation.Limits, b labels.ScratchBuilder, st *writev2.SymbolsTable) (cortexpbv2.PreallocTimeseriesV2, validation.ValidationError) { +func (d *Distributor) validateSeriesV2(ts cortexpb.PreallocTimeseriesV2, seriesLabels []cortexpb.LabelAdapter, symbols []string, userID string, skipLabelNameValidation bool, limits *validation.Limits, b labels.ScratchBuilder, st *writev2.SymbolsTable) (cortexpb.PreallocTimeseriesV2, validation.ValidationError) { d.labelsHistogram.Observe(float64(len(ts.LabelsRefs))) if err := validation.ValidateLabels(d.validateMetrics, limits, userID, seriesLabels, skipLabelNameValidation); err != nil { return emptyPreallocSeriesV2, err } - var samples []cortexpbv2.Sample + var samples []cortexpb.Sample if len(ts.Samples) > 0 { // Only alloc when data present - samples = make([]cortexpbv2.Sample, 0, len(ts.Samples)) + samples = make([]cortexpb.Sample, 0, len(ts.Samples)) for _, s := range ts.Samples { - if err := validation.ValidateSampleTimestamp(d.validateMetrics, limits, userID, seriesLabels, s.Timestamp); err != nil { + if err := validation.ValidateSampleTimestamp(d.validateMetrics, limits, userID, seriesLabels, s.TimestampMs); err != nil { return emptyPreallocSeriesV2, err } samples = append(samples, s) } } - var exemplars []cortexpbv2.Exemplar + var exemplars []cortexpb.ExemplarV2 if len(ts.Exemplars) > 0 { // Only alloc when data present - exemplars = make([]cortexpbv2.Exemplar, 0, len(ts.Exemplars)) + exemplars = make([]cortexpb.ExemplarV2, 0, len(ts.Exemplars)) for _, e := range ts.Exemplars { if err := validation.ValidateExemplarV2(d.validateMetrics, symbols, userID, seriesLabels, &e, b, st); err != nil { // An exemplar validation error prevents ingesting samples @@ -887,15 +933,15 @@ func (d *Distributor) validateSeriesV2(ts cortexpbv2.PreallocTimeseriesV2, serie } } - var histograms []cortexpbv2.Histogram + var histograms []cortexpb.Histogram if len(ts.Histograms) > 0 { // Only alloc when data present - histograms = make([]cortexpbv2.Histogram, 0, len(ts.Histograms)) + histograms = make([]cortexpb.Histogram, 0, len(ts.Histograms)) for i, h := range ts.Histograms { - if err := validation.ValidateSampleTimestamp(d.validateMetrics, limits, userID, seriesLabels, h.Timestamp); err != nil { + if err := validation.ValidateSampleTimestamp(d.validateMetrics, limits, userID, seriesLabels, h.TimestampMs); err != nil { return emptyPreallocSeriesV2, err } - convertedHistogram, err := validation.ValidateNativeHistogramV2(d.validateMetrics, limits, userID, seriesLabels, h) + convertedHistogram, err := validation.ValidateNativeHistogram(d.validateMetrics, limits, userID, seriesLabels, h) if err != nil { return emptyPreallocSeriesV2, err } @@ -910,8 +956,8 @@ func (d *Distributor) validateSeriesV2(ts cortexpbv2.PreallocTimeseriesV2, serie return emptyPreallocSeriesV2, err } - return cortexpbv2.PreallocTimeseriesV2{ - TimeSeries: &cortexpbv2.TimeSeries{ + return cortexpb.PreallocTimeseriesV2{ + TimeSeriesV2: &cortexpb.TimeSeriesV2{ LabelsRefs: ts.LabelsRefs, Samples: samples, Exemplars: exemplars, @@ -921,7 +967,7 @@ func (d *Distributor) validateSeriesV2(ts cortexpbv2.PreallocTimeseriesV2, serie }, nil } -func (d *Distributor) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) { +func (d *Distributor) PushV2(ctx context.Context, req *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error) { userID, err := tenant.TenantID(ctx) if err != nil { return nil, err @@ -973,7 +1019,7 @@ func (d *Distributor) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) removeReplica, err = d.checkSample(ctx, userID, cluster, replica, limits) if err != nil { // Ensure the request slice is reused if the series get deduped. - cortexpbv2.ReuseSlice(req.Timeseries) + cortexpb.ReuseSliceV2(req.Timeseries) if errors.Is(err, ha.ReplicasNotMatchError{}) { // These samples have been deduped. @@ -1009,16 +1055,16 @@ func (d *Distributor) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) if len(seriesKeys) == 0 { // Ensure the request slice is reused if there's no series or metadata passing the validation. - cortexpbv2.ReuseSlice(req.Timeseries) + cortexpb.ReuseSliceV2(req.Timeseries) - return &cortexpbv2.WriteResponse{}, firstPartialErr + return &cortexpb.WriteResponseV2{}, firstPartialErr } totalSamples := validatedFloatSamples + validatedHistogramSamples totalN := totalSamples + validatedExemplars + validatedMetadatas if !d.ingestionRateLimiter.AllowN(now, userID, int(totalN)) { // Ensure the request slice is reused if the request is rate limited. - cortexpbv2.ReuseSlice(req.Timeseries) + cortexpb.ReuseSliceV2(req.Timeseries) d.validateMetrics.DiscardedSamples.WithLabelValues(validation.RateLimited, userID).Add(float64(totalSamples)) d.validateMetrics.DiscardedExemplars.WithLabelValues(validation.RateLimited, userID).Add(float64(validatedExemplars)) @@ -1048,7 +1094,7 @@ func (d *Distributor) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) return nil, err } - resp := &cortexpbv2.WriteResponse{ + resp := &cortexpb.WriteResponseV2{ Samples: s.LoadSamples(), Histograms: s.LoadHistogram(), Exemplars: s.LoadExemplars(), @@ -1420,7 +1466,7 @@ func sortLabelsIfNeeded(labels []cortexpb.LabelAdapter) { }) } -func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, timeseries []cortexpb.PreallocTimeseries, metadata []*cortexpb.MetricMetadata, source cortexpb.WriteRequest_SourceEnum) error { +func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, timeseries []cortexpb.PreallocTimeseries, metadata []*cortexpb.MetricMetadata, source cortexpb.SourceEnum) error { h, err := d.ingesterPool.GetClientFor(ingester.Addr) if err != nil { return err diff --git a/pkg/distributor/distributor_prw2_test.go b/pkg/distributor/distributor_prw2_test.go index 80f8d92f70..77227125af 100644 --- a/pkg/distributor/distributor_prw2_test.go +++ b/pkg/distributor/distributor_prw2_test.go @@ -25,7 +25,6 @@ import ( "google.golang.org/grpc/status" "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/cortexpbv2" "github.com/cortexproject/cortex/pkg/ingester" "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/ring" @@ -43,7 +42,7 @@ import ( ) var ( - emptyResponseV2 = &cortexpbv2.WriteResponse{} + emptyResponseV2 = &cortexpb.WriteResponseV2{} ) func TestDistributorPRW2_Push_LabelRemoval_RemovingNameLabelWillError(t *testing.T) { @@ -97,7 +96,7 @@ func TestDistributorPRW2_Push_LabelRemoval(t *testing.T) { expectedSeries labels.Labels removeReplica bool removeLabels []string - exemplars []cortexpbv2.Exemplar + exemplars []cortexpb.ExemplarV2 } cases := []testcase{ @@ -153,7 +152,7 @@ func TestDistributorPRW2_Push_LabelRemoval(t *testing.T) { {Name: "__replica__", Value: "two"}, }, expectedSeries: labels.Labels{}, - exemplars: []cortexpbv2.Exemplar{ + exemplars: []cortexpb.ExemplarV2{ {LabelsRefs: []uint32{1, 2}, Value: 1, Timestamp: 0}, {LabelsRefs: []uint32{1, 2}, Value: 1, Timestamp: 0}, }, @@ -223,7 +222,7 @@ func TestDistributorPRW2_PushHAInstances(t *testing.T) { testReplica string cluster string samples int - expectedResponse *cortexpbv2.WriteResponse + expectedResponse *cortexpb.WriteResponseV2 expectedCode int32 }{ { @@ -315,14 +314,14 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) { tests := map[string]struct { prepareConfig func(limits *validation.Limits) - prepareSeries func() ([]labels.Labels, []cortexpbv2.Sample) + prepareSeries func() ([]labels.Labels, []cortexpb.Sample) expectedErr string }{ "all samples successfully pushed": { prepareConfig: func(limits *validation.Limits) {}, - prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) { + prepareSeries: func() ([]labels.Labels, []cortexpb.Sample) { metrics := make([]labels.Labels, numSeriesPerRequest) - samples := make([]cortexpbv2.Sample, numSeriesPerRequest) + samples := make([]cortexpb.Sample, numSeriesPerRequest) for i := 0; i < numSeriesPerRequest; i++ { lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) @@ -331,9 +330,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) { } metrics[i] = lbls.Labels() - samples[i] = cortexpbv2.Sample{ - Value: float64(i), - Timestamp: time.Now().UnixNano() / int64(time.Millisecond), + samples[i] = cortexpb.Sample{ + Value: float64(i), + TimestampMs: time.Now().UnixNano() / int64(time.Millisecond), } } @@ -346,9 +345,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) { limits.IngestionRate = 1 limits.IngestionBurstSize = 1 }, - prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) { + prepareSeries: func() ([]labels.Labels, []cortexpb.Sample) { metrics := make([]labels.Labels, numSeriesPerRequest) - samples := make([]cortexpbv2.Sample, numSeriesPerRequest) + samples := make([]cortexpb.Sample, numSeriesPerRequest) for i := 0; i < numSeriesPerRequest; i++ { lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) @@ -357,9 +356,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) { } metrics[i] = lbls.Labels() - samples[i] = cortexpbv2.Sample{ - Value: float64(i), - Timestamp: time.Now().UnixNano() / int64(time.Millisecond), + samples[i] = cortexpb.Sample{ + Value: float64(i), + TimestampMs: time.Now().UnixNano() / int64(time.Millisecond), } } @@ -371,9 +370,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) { prepareConfig: func(limits *validation.Limits) { limits.MaxLabelNamesPerSeries = 30 }, - prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) { + prepareSeries: func() ([]labels.Labels, []cortexpb.Sample) { metrics := make([]labels.Labels, numSeriesPerRequest) - samples := make([]cortexpbv2.Sample, numSeriesPerRequest) + samples := make([]cortexpb.Sample, numSeriesPerRequest) for i := 0; i < numSeriesPerRequest; i++ { lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) @@ -382,9 +381,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) { } metrics[i] = lbls.Labels() - samples[i] = cortexpbv2.Sample{ - Value: float64(i), - Timestamp: time.Now().UnixNano() / int64(time.Millisecond), + samples[i] = cortexpb.Sample{ + Value: float64(i), + TimestampMs: time.Now().UnixNano() / int64(time.Millisecond), } } @@ -396,9 +395,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) { prepareConfig: func(limits *validation.Limits) { limits.MaxLabelNameLength = 1024 }, - prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) { + prepareSeries: func() ([]labels.Labels, []cortexpb.Sample) { metrics := make([]labels.Labels, numSeriesPerRequest) - samples := make([]cortexpbv2.Sample, numSeriesPerRequest) + samples := make([]cortexpb.Sample, numSeriesPerRequest) for i := 0; i < numSeriesPerRequest; i++ { lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) @@ -410,9 +409,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) { lbls.Set(fmt.Sprintf("xxx_%0.2000d", 1), "xxx") metrics[i] = lbls.Labels() - samples[i] = cortexpbv2.Sample{ - Value: float64(i), - Timestamp: time.Now().UnixNano() / int64(time.Millisecond), + samples[i] = cortexpb.Sample{ + Value: float64(i), + TimestampMs: time.Now().UnixNano() / int64(time.Millisecond), } } @@ -424,9 +423,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) { prepareConfig: func(limits *validation.Limits) { limits.MaxLabelValueLength = 1024 }, - prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) { + prepareSeries: func() ([]labels.Labels, []cortexpb.Sample) { metrics := make([]labels.Labels, numSeriesPerRequest) - samples := make([]cortexpbv2.Sample, numSeriesPerRequest) + samples := make([]cortexpb.Sample, numSeriesPerRequest) for i := 0; i < numSeriesPerRequest; i++ { lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) @@ -438,9 +437,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) { lbls.Set("xxx", fmt.Sprintf("xxx_%0.2000d", 1)) metrics[i] = lbls.Labels() - samples[i] = cortexpbv2.Sample{ - Value: float64(i), - Timestamp: time.Now().UnixNano() / int64(time.Millisecond), + samples[i] = cortexpb.Sample{ + Value: float64(i), + TimestampMs: time.Now().UnixNano() / int64(time.Millisecond), } } @@ -452,9 +451,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) { prepareConfig: func(limits *validation.Limits) { limits.MaxLabelsSizeBytes = 1024 }, - prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) { + prepareSeries: func() ([]labels.Labels, []cortexpb.Sample) { metrics := make([]labels.Labels, numSeriesPerRequest) - samples := make([]cortexpbv2.Sample, numSeriesPerRequest) + samples := make([]cortexpb.Sample, numSeriesPerRequest) for i := 0; i < numSeriesPerRequest; i++ { lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) @@ -466,9 +465,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) { lbls.Set("xxx", fmt.Sprintf("xxx_%0.2000d", 1)) metrics[i] = lbls.Labels() - samples[i] = cortexpbv2.Sample{ - Value: float64(i), - Timestamp: time.Now().UnixNano() / int64(time.Millisecond), + samples[i] = cortexpb.Sample{ + Value: float64(i), + TimestampMs: time.Now().UnixNano() / int64(time.Millisecond), } } @@ -481,9 +480,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) { limits.RejectOldSamples = true limits.RejectOldSamplesMaxAge = model.Duration(time.Hour) }, - prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) { + prepareSeries: func() ([]labels.Labels, []cortexpb.Sample) { metrics := make([]labels.Labels, numSeriesPerRequest) - samples := make([]cortexpbv2.Sample, numSeriesPerRequest) + samples := make([]cortexpb.Sample, numSeriesPerRequest) for i := 0; i < numSeriesPerRequest; i++ { lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) @@ -492,9 +491,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) { } metrics[i] = lbls.Labels() - samples[i] = cortexpbv2.Sample{ - Value: float64(i), - Timestamp: time.Now().Add(-2*time.Hour).UnixNano() / int64(time.Millisecond), + samples[i] = cortexpb.Sample{ + Value: float64(i), + TimestampMs: time.Now().Add(-2*time.Hour).UnixNano() / int64(time.Millisecond), } } @@ -506,9 +505,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) { prepareConfig: func(limits *validation.Limits) { limits.CreationGracePeriod = model.Duration(time.Minute) }, - prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) { + prepareSeries: func() ([]labels.Labels, []cortexpb.Sample) { metrics := make([]labels.Labels, numSeriesPerRequest) - samples := make([]cortexpbv2.Sample, numSeriesPerRequest) + samples := make([]cortexpb.Sample, numSeriesPerRequest) for i := 0; i < numSeriesPerRequest; i++ { lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: "foo"}}) @@ -517,9 +516,9 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) { } metrics[i] = lbls.Labels() - samples[i] = cortexpbv2.Sample{ - Value: float64(i), - Timestamp: time.Now().Add(time.Hour).UnixNano() / int64(time.Millisecond), + samples[i] = cortexpb.Sample{ + Value: float64(i), + TimestampMs: time.Now().Add(time.Hour).UnixNano() / int64(time.Millisecond), } } @@ -596,7 +595,7 @@ func BenchmarkDistributorPRW2_Push(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - _, err := distributor.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)) + _, err := distributor.PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API)) if testData.expectedErr == "" && err != nil { b.Fatalf("no error expected but got %v", err) } @@ -628,7 +627,7 @@ func TestDistributorPRW2_Push(t *testing.T) { samples samplesIn histogramSamples bool metadata int - expectedResponse *cortexpbv2.WriteResponse + expectedResponse *cortexpb.WriteResponseV2 expectedError error expectedMetrics string ingesterError error @@ -857,7 +856,7 @@ func TestDistributorPRW2_Push(t *testing.T) { errFail: tc.ingesterError, }) - var request *cortexpbv2.WriteRequest + var request *cortexpb.WriteRequestV2 if !tc.histogramSamples { request = makeWriteRequestV2WithSamples(tc.samples.startTimestampMs, tc.samples.num, tc.metadata) } else { @@ -966,7 +965,7 @@ func TestDistributorPRW2_PushIngestionRateLimiter(t *testing.T) { // Push samples in multiple requests to the first distributor for _, push := range testData.pushes { - var request *cortexpbv2.WriteRequest + var request *cortexpb.WriteRequestV2 if !enableHistogram { request = makeWriteRequestV2WithSamples(0, push.samples, push.metadata) } else { @@ -1229,7 +1228,7 @@ func TestDistributorPRW2_PushInstanceLimits(t *testing.T) { d.ingestionRate.Tick() for _, push := range testData.pushes { - var request *cortexpbv2.WriteRequest + var request *cortexpb.WriteRequestV2 if enableHistogram { request = makeWriteRequestV2WithHistogram(0, push.samples, push.metadata) } else { @@ -1401,7 +1400,7 @@ func TestDistributorPRW2_PushQuery(t *testing.T) { request := makeWriteRequestV2WithSamples(0, tc.samples, tc.metadata) writeResponse, err := ds[0].PushV2(ctx, request) - assert.Equal(t, &cortexpbv2.WriteResponse{}, writeResponse) + assert.Equal(t, &cortexpb.WriteResponseV2{}, writeResponse) assert.Nil(t, err) var response model.Matrix @@ -1451,7 +1450,7 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxChunksPerQueryLimitIs // Push a number of series below the max chunks limit. Each series has 1 sample, // so expect 1 chunk per series when querying back. initialSeries := maxChunksLimit / 3 - var writeReqV2 *cortexpbv2.WriteRequest + var writeReqV2 *cortexpb.WriteRequestV2 if histogram { writeReqV2 = makeWriteRequestV2WithHistogram(0, initialSeries, 0) } else { @@ -1459,7 +1458,7 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxChunksPerQueryLimitIs } writeRes, err := ds[0].PushV2(ctx, writeReqV2) - assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes) + assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes) assert.Nil(t, err) allSeriesMatchers := []*labels.Matcher{ @@ -1475,13 +1474,13 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxChunksPerQueryLimitIs // Push more series to exceed the limit once we'll query back all series. for i := 0; i < maxChunksLimit; i++ { - writeReq := &cortexpbv2.WriteRequest{} + writeReq := &cortexpb.WriteRequestV2{} writeReq.Symbols = []string{"", "__name__", fmt.Sprintf("another_series_%d", i)} writeReq.Timeseries = append(writeReq.Timeseries, makeWriteRequestV2Timeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: fmt.Sprintf("another_series_%d", i)}}, 0, 0, histogram, false), ) writeRes, err := ds[0].PushV2(ctx, writeReq) - assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes) + assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes) assert.Nil(t, err) } @@ -1514,7 +1513,7 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxSeriesPerQueryLimitIs // Push a number of series below the max series limit. initialSeries := maxSeriesLimit - var writeReqV2 *cortexpbv2.WriteRequest + var writeReqV2 *cortexpb.WriteRequestV2 if histogram { writeReqV2 = makeWriteRequestV2WithHistogram(0, initialSeries, 0) } else { @@ -1522,7 +1521,7 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxSeriesPerQueryLimitIs } writeRes, err := ds[0].PushV2(ctx, writeReqV2) - assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes) + assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes) assert.Nil(t, err) allSeriesMatchers := []*labels.Matcher{ @@ -1536,14 +1535,14 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxSeriesPerQueryLimitIs assert.Len(t, queryRes.Chunkseries, initialSeries) // Push more series to exceed the limit once we'll query back all series. - writeReq := &cortexpbv2.WriteRequest{} + writeReq := &cortexpb.WriteRequestV2{} writeReq.Symbols = []string{"", "__name__", "another_series"} writeReq.Timeseries = append(writeReq.Timeseries, makeWriteRequestV2Timeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series"}}, 0, 0, histogram, false), ) writeRes, err = ds[0].PushV2(ctx, writeReq) - assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes) + assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes) assert.Nil(t, err) // Since the number of series is exceeding the limit, we expect @@ -1579,13 +1578,13 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxChunkBytesPerQueryLim labels.MustNewMatcher(labels.MatchRegexp, model.MetricNameLabel, ".+"), } // Push a single series to allow us to calculate the chunk size to calculate the limit for the test. - writeReq := &cortexpbv2.WriteRequest{} + writeReq := &cortexpb.WriteRequestV2{} writeReq.Symbols = []string{"", "__name__", "another_series"} writeReq.Timeseries = append(writeReq.Timeseries, makeWriteRequestV2Timeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series"}}, 0, 0, histogram, false), ) writeRes, err := ds[0].PushV2(ctx, writeReq) - assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes) + assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes) assert.Nil(t, err) chunkSizeResponse, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) require.NoError(t, err) @@ -1598,7 +1597,7 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxChunkBytesPerQueryLim ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(0, maxBytesLimit, 0, 0)) // Push a number of series below the max chunk bytes limit. Subtract one for the series added above. - var writeReqV2 *cortexpbv2.WriteRequest + var writeReqV2 *cortexpb.WriteRequestV2 if histogram { writeReqV2 = makeWriteRequestV2WithHistogram(0, seriesToAdd-1, 0) } else { @@ -1606,7 +1605,7 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxChunkBytesPerQueryLim } writeRes, err = ds[0].PushV2(ctx, writeReqV2) - assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes) + assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes) assert.Nil(t, err) // Since the number of chunk bytes is equal to the limit (but doesn't @@ -1616,14 +1615,14 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxChunkBytesPerQueryLim assert.Len(t, queryRes.Chunkseries, seriesToAdd) // Push another series to exceed the chunk bytes limit once we'll query back all series. - writeReq = &cortexpbv2.WriteRequest{} + writeReq = &cortexpb.WriteRequestV2{} writeReq.Symbols = []string{"", "__name__", "another_series_1"} writeReq.Timeseries = append(writeReq.Timeseries, makeWriteRequestV2Timeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series_1"}}, 0, 0, histogram, false), ) writeRes, err = ds[0].PushV2(ctx, writeReq) - assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes) + assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes) assert.Nil(t, err) // Since the aggregated chunk size is exceeding the limit, we expect @@ -1659,14 +1658,14 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxDataBytesPerQueryLimi labels.MustNewMatcher(labels.MatchRegexp, model.MetricNameLabel, ".+"), } // Push a single series to allow us to calculate the label size to calculate the limit for the test. - writeReq := &cortexpbv2.WriteRequest{} + writeReq := &cortexpb.WriteRequestV2{} writeReq.Symbols = []string{"", "__name__", "another_series"} writeReq.Timeseries = append(writeReq.Timeseries, makeWriteRequestV2Timeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series"}}, 0, 0, histogram, false), ) writeRes, err := ds[0].PushV2(ctx, writeReq) - assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes) + assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes) assert.Nil(t, err) dataSizeResponse, err := ds[0].QueryStream(ctx, math.MinInt32, math.MaxInt32, allSeriesMatchers...) require.NoError(t, err) @@ -1679,7 +1678,7 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxDataBytesPerQueryLimi ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(0, 0, 0, maxBytesLimit)) // Push a number of series below the max chunk bytes limit. Subtract one for the series added above. - var writeReqV2 *cortexpbv2.WriteRequest + var writeReqV2 *cortexpb.WriteRequestV2 if histogram { writeReqV2 = makeWriteRequestV2WithHistogram(0, seriesToAdd-1, 0) } else { @@ -1687,7 +1686,7 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxDataBytesPerQueryLimi } writeRes, err = ds[0].PushV2(ctx, writeReqV2) - assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes) + assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes) assert.Nil(t, err) // Since the number of chunk bytes is equal to the limit (but doesn't @@ -1697,14 +1696,14 @@ func TestDistributorPRW2_QueryStream_ShouldReturnErrorIfMaxDataBytesPerQueryLimi assert.Len(t, queryRes.Chunkseries, seriesToAdd) // Push another series to exceed the chunk bytes limit once we'll query back all series. - writeReq = &cortexpbv2.WriteRequest{} + writeReq = &cortexpb.WriteRequestV2{} writeReq.Symbols = []string{"", "__name__", "another_series_1"} writeReq.Timeseries = append(writeReq.Timeseries, makeWriteRequestV2Timeseries([]cortexpb.LabelAdapter{{Name: model.MetricNameLabel, Value: "another_series_1"}}, 0, 0, histogram, false), ) writeRes, err = ds[0].PushV2(ctx, writeReq) - assert.Equal(t, &cortexpbv2.WriteResponse{}, writeRes) + assert.Equal(t, &cortexpb.WriteResponseV2{}, writeRes) assert.Nil(t, err) // Since the aggregated chunk size is exceeding the limit, we expect @@ -1824,8 +1823,8 @@ func TestDistributorPRW2_Push_ShouldGuaranteeShardingTokenConsistencyOverTheTime } } -func makeWriteRequestV2WithSamples(startTimestampMs int64, samples int, metadata int) *cortexpbv2.WriteRequest { - request := &cortexpbv2.WriteRequest{} +func makeWriteRequestV2WithSamples(startTimestampMs int64, samples int, metadata int) *cortexpb.WriteRequestV2 { + request := &cortexpb.WriteRequestV2{} st := writev2.NewSymbolTable() st.SymbolizeLabels(labels.Labels{{Name: "__name__", Value: "foo"}, {Name: "bar", Value: "baz"}}, nil) @@ -1916,7 +1915,7 @@ func TestDistributorPRW2_Push_ExemplarValidation(t *testing.T) { } tests := map[string]struct { - req *cortexpbv2.WriteRequest + req *cortexpb.WriteRequestV2 errMsg string }{ "valid exemplar": { @@ -2207,16 +2206,16 @@ func BenchmarkDistributorPRW2_MetricsForLabelMatchers(b *testing.B) { tests := map[string]struct { prepareConfig func(limits *validation.Limits) - prepareSeries func() ([]labels.Labels, []cortexpbv2.Sample) + prepareSeries func() ([]labels.Labels, []cortexpb.Sample) matchers []*labels.Matcher queryLimiter *limiter.QueryLimiter expectedErr error }{ "get series within limits": { prepareConfig: func(limits *validation.Limits) {}, - prepareSeries: func() ([]labels.Labels, []cortexpbv2.Sample) { + prepareSeries: func() ([]labels.Labels, []cortexpb.Sample) { metrics := make([]labels.Labels, numSeriesPerRequest) - samples := make([]cortexpbv2.Sample, numSeriesPerRequest) + samples := make([]cortexpb.Sample, numSeriesPerRequest) for i := 0; i < numSeriesPerRequest; i++ { lbls := labels.NewBuilder(labels.Labels{{Name: model.MetricNameLabel, Value: fmt.Sprintf("foo_%d", i)}}) @@ -2225,9 +2224,9 @@ func BenchmarkDistributorPRW2_MetricsForLabelMatchers(b *testing.B) { } metrics[i] = lbls.Labels() - samples[i] = cortexpbv2.Sample{ - Value: float64(i), - Timestamp: time.Now().UnixNano() / int64(time.Millisecond), + samples[i] = cortexpb.Sample{ + Value: float64(i), + TimestampMs: time.Now().UnixNano() / int64(time.Millisecond), } } @@ -2260,7 +2259,7 @@ func BenchmarkDistributorPRW2_MetricsForLabelMatchers(b *testing.B) { // Prepare the series to remote write before starting the benchmark. metrics, samples := testData.prepareSeries() - if _, err := ds[0].PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)); err != nil { + if _, err := ds[0].PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API)); err != nil { b.Fatalf("error pushing to distributor %v", err) } @@ -2351,8 +2350,8 @@ func TestDistributorPRW2_MetricsMetadata(t *testing.T) { } } -func makeWriteRequestV2WithHistogram(startTimestampMs int64, histogram int, metadata int) *cortexpbv2.WriteRequest { - request := &cortexpbv2.WriteRequest{} +func makeWriteRequestV2WithHistogram(startTimestampMs int64, histogram int, metadata int) *cortexpb.WriteRequestV2 { + request := &cortexpb.WriteRequestV2{} st := writev2.NewSymbolTable() st.SymbolizeLabels(labels.Labels{{Name: "__name__", Value: "foo"}, {Name: "bar", Value: "baz"}}, nil) @@ -2375,48 +2374,48 @@ func makeWriteRequestV2WithHistogram(startTimestampMs int64, histogram int, meta return request } -func makeMetadataV2FromST(value int, st *writev2.SymbolsTable) cortexpbv2.PreallocTimeseriesV2 { - t := cortexpbv2.PreallocTimeseriesV2{ - TimeSeries: &cortexpbv2.TimeSeries{ +func makeMetadataV2FromST(value int, st *writev2.SymbolsTable) cortexpb.PreallocTimeseriesV2 { + t := cortexpb.PreallocTimeseriesV2{ + TimeSeriesV2: &cortexpb.TimeSeriesV2{ LabelsRefs: []uint32{1, 2}, }, } helpRef := st.Symbolize(fmt.Sprintf("a help for metric_%d", value)) - t.Metadata.Type = cortexpbv2.METRIC_TYPE_COUNTER + t.Metadata.Type = cortexpb.COUNTER t.Metadata.HelpRef = helpRef return t } -func makeTimeseriesV2FromST(labels []cortexpb.LabelAdapter, st *writev2.SymbolsTable, ts int64, value int, histogram bool, metadata bool) cortexpbv2.PreallocTimeseriesV2 { +func makeTimeseriesV2FromST(labels []cortexpb.LabelAdapter, st *writev2.SymbolsTable, ts int64, value int, histogram bool, metadata bool) cortexpb.PreallocTimeseriesV2 { var helpRef uint32 if metadata { helpRef = st.Symbolize(fmt.Sprintf("a help for metric_%d", value)) } - t := cortexpbv2.PreallocTimeseriesV2{ - TimeSeries: &cortexpbv2.TimeSeries{ + t := cortexpb.PreallocTimeseriesV2{ + TimeSeriesV2: &cortexpb.TimeSeriesV2{ LabelsRefs: st.SymbolizeLabels(cortexpb.FromLabelAdaptersToLabels(labels), nil), }, } if metadata { - t.Metadata.Type = cortexpbv2.METRIC_TYPE_COUNTER + t.Metadata.Type = cortexpb.COUNTER t.Metadata.HelpRef = helpRef } if histogram { - t.Histograms = append(t.Histograms, cortexpbv2.HistogramToHistogramProto(ts, tsdbutil.GenerateTestHistogram(value))) + t.Histograms = append(t.Histograms, cortexpb.HistogramToHistogramProto(ts, tsdbutil.GenerateTestHistogram(value))) } else { - t.Samples = append(t.Samples, cortexpbv2.Sample{ - Timestamp: ts, - Value: float64(value), + t.Samples = append(t.Samples, cortexpb.Sample{ + TimestampMs: ts, + Value: float64(value), }) } return t } -func makeWriteRequestV2Timeseries(labels []cortexpb.LabelAdapter, ts int64, value int, histogram bool, metadata bool) cortexpbv2.PreallocTimeseriesV2 { +func makeWriteRequestV2Timeseries(labels []cortexpb.LabelAdapter, ts int64, value int, histogram bool, metadata bool) cortexpb.PreallocTimeseriesV2 { st := writev2.NewSymbolTable() st.SymbolizeLabels(cortexpb.FromLabelAdaptersToLabels(labels), nil) @@ -2425,29 +2424,29 @@ func makeWriteRequestV2Timeseries(labels []cortexpb.LabelAdapter, ts int64, valu helpRef = st.Symbolize(fmt.Sprintf("a help for metric_%d", value)) } - t := cortexpbv2.PreallocTimeseriesV2{ - TimeSeries: &cortexpbv2.TimeSeries{ + t := cortexpb.PreallocTimeseriesV2{ + TimeSeriesV2: &cortexpb.TimeSeriesV2{ LabelsRefs: st.SymbolizeLabels(cortexpb.FromLabelAdaptersToLabels(labels), nil), }, } if metadata { - t.Metadata.Type = cortexpbv2.METRIC_TYPE_COUNTER + t.Metadata.Type = cortexpb.COUNTER t.Metadata.HelpRef = helpRef } if histogram { - t.Histograms = append(t.Histograms, cortexpbv2.HistogramToHistogramProto(ts, tsdbutil.GenerateTestHistogram(value))) + t.Histograms = append(t.Histograms, cortexpb.HistogramToHistogramProto(ts, tsdbutil.GenerateTestHistogram(value))) } else { - t.Samples = append(t.Samples, cortexpbv2.Sample{ - Timestamp: ts, - Value: float64(value), + t.Samples = append(t.Samples, cortexpb.Sample{ + TimestampMs: ts, + Value: float64(value), }) } return t } -func makeWriteRequestV2Exemplar(seriesLabels []string, timestamp int64, exemplarLabels []string) *cortexpbv2.WriteRequest { +func makeWriteRequestV2Exemplar(seriesLabels []string, timestamp int64, exemplarLabels []string) *cortexpb.WriteRequestV2 { st := writev2.NewSymbolTable() for _, l := range seriesLabels { st.Symbolize(l) @@ -2456,15 +2455,15 @@ func makeWriteRequestV2Exemplar(seriesLabels []string, timestamp int64, exemplar st.Symbolize(l) } - return &cortexpbv2.WriteRequest{ + return &cortexpb.WriteRequestV2{ Symbols: st.Symbols(), - Timeseries: []cortexpbv2.PreallocTimeseriesV2{ + Timeseries: []cortexpb.PreallocTimeseriesV2{ { - TimeSeries: &cortexpbv2.TimeSeries{ - LabelsRefs: cortexpbv2.GetLabelRefsFromLabelAdapters(st.Symbols(), cortexpb.FromLabelsToLabelAdapters(labels.FromStrings(seriesLabels...))), - Exemplars: []cortexpbv2.Exemplar{ + TimeSeriesV2: &cortexpb.TimeSeriesV2{ + LabelsRefs: cortexpb.GetLabelRefsFromLabelAdapters(st.Symbols(), cortexpb.FromLabelsToLabelAdapters(labels.FromStrings(seriesLabels...))), + Exemplars: []cortexpb.ExemplarV2{ { - LabelsRefs: cortexpbv2.GetLabelRefsFromLabelAdapters(st.Symbols(), cortexpb.FromLabelsToLabelAdapters(labels.FromStrings(exemplarLabels...))), + LabelsRefs: cortexpb.GetLabelRefsFromLabelAdapters(st.Symbols(), cortexpb.FromLabelsToLabelAdapters(labels.FromStrings(exemplarLabels...))), Timestamp: timestamp, }, }, @@ -2474,47 +2473,47 @@ func makeWriteRequestV2Exemplar(seriesLabels []string, timestamp int64, exemplar } } -func mockWriteRequestV2(lbls []labels.Labels, value int64, timestamp int64, histogram bool) *cortexpbv2.WriteRequest { +func mockWriteRequestV2(lbls []labels.Labels, value int64, timestamp int64, histogram bool) *cortexpb.WriteRequestV2 { var ( - samples []cortexpbv2.Sample - histograms []cortexpbv2.Histogram + samples []cortexpb.Sample + histograms []cortexpb.Histogram ) if histogram { - histograms = make([]cortexpbv2.Histogram, len(lbls)) + histograms = make([]cortexpb.Histogram, len(lbls)) for i := range lbls { - histograms[i] = cortexpbv2.HistogramToHistogramProto(timestamp, tsdbutil.GenerateTestHistogram(int(value))) + histograms[i] = cortexpb.HistogramToHistogramProto(timestamp, tsdbutil.GenerateTestHistogram(int(value))) } } else { - samples = make([]cortexpbv2.Sample, len(lbls)) + samples = make([]cortexpb.Sample, len(lbls)) for i := range lbls { - samples[i] = cortexpbv2.Sample{ - Timestamp: timestamp, - Value: float64(value), + samples[i] = cortexpb.Sample{ + TimestampMs: timestamp, + Value: float64(value), } } } - return cortexpbv2.ToWriteRequestV2(lbls, samples, histograms, nil, cortexpbv2.API) + return cortexpb.ToWriteRequestV2(lbls, samples, histograms, nil, cortexpb.API) } -func makeWriteRequestHAV2(samples int, replica, cluster string, histogram bool) *cortexpbv2.WriteRequest { - request := &cortexpbv2.WriteRequest{} +func makeWriteRequestHAV2(samples int, replica, cluster string, histogram bool) *cortexpb.WriteRequestV2 { + request := &cortexpb.WriteRequestV2{} st := writev2.NewSymbolTable() for i := 0; i < samples; i++ { - ts := cortexpbv2.PreallocTimeseriesV2{ - TimeSeries: &cortexpbv2.TimeSeries{ + ts := cortexpb.PreallocTimeseriesV2{ + TimeSeriesV2: &cortexpb.TimeSeriesV2{ LabelsRefs: st.SymbolizeLabels(labels.Labels{{Name: "__name__", Value: "foo"}, {Name: "__replica__", Value: replica}, {Name: "bar", Value: "baz"}, {Name: "cluster", Value: cluster}, {Name: "sample", Value: fmt.Sprintf("%d", i)}}, nil), }, } if histogram { - ts.Histograms = []cortexpbv2.Histogram{ - cortexpbv2.HistogramToHistogramProto(int64(i), tsdbutil.GenerateTestHistogram(i)), + ts.Histograms = []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(int64(i), tsdbutil.GenerateTestHistogram(i)), } } else { - ts.Samples = []cortexpbv2.Sample{ + ts.Samples = []cortexpb.Sample{ { - Value: float64(i), - Timestamp: int64(i), + Value: float64(i), + TimestampMs: int64(i), }, } } diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 68d3353cf8..4bcb9ad0ac 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -35,7 +35,6 @@ import ( promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/cortexpbv2" "github.com/cortexproject/cortex/pkg/ha" "github.com/cortexproject/cortex/pkg/ingester" "github.com/cortexproject/cortex/pkg/ingester/client" @@ -3070,11 +3069,11 @@ func (i *mockIngester) PushPreAlloc(ctx context.Context, in *cortexpb.PreallocWr return i.Push(ctx, &in.WriteRequest, opts...) } -func (i *mockIngester) PushPreAllocV2(ctx context.Context, in *cortexpbv2.PreallocWriteRequestV2, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) { - return i.PushV2(ctx, &in.WriteRequest, opts...) +func (i *mockIngester) PushPreAllocV2(ctx context.Context, in *cortexpb.PreallocWriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error) { + return i.PushV2(ctx, &in.WriteRequestV2, opts...) } -func (i *mockIngester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) { +func (i *mockIngester) PushV2(ctx context.Context, req *cortexpb.WriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error) { i.Lock() defer i.Unlock() @@ -3108,7 +3107,7 @@ func (i *mockIngester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest, for _, s := range series.Samples { v1Sample = append(v1Sample, cortexpb.Sample{ Value: s.Value, - TimestampMs: s.Timestamp, + TimestampMs: s.TimestampMs, }) } if !ok { @@ -3126,7 +3125,7 @@ func (i *mockIngester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest, existing.Samples = append(existing.Samples, v1Sample...) } - if series.Metadata.Type != cortexpbv2.METRIC_TYPE_UNSPECIFIED { + if series.Metadata.Type != cortexpb.UNKNOWN { m := series.Metadata.ToV1Metadata(tsLabels.Get(model.MetricNameLabel), req.Symbols) hash = shardByMetricName(orgid, m.MetricFamilyName) set, ok := i.metadata[hash] @@ -3138,7 +3137,7 @@ func (i *mockIngester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest, } } - return &cortexpbv2.WriteResponse{}, nil + return &cortexpb.WriteResponseV2{}, nil } func (i *mockIngester) Push(ctx context.Context, req *cortexpb.WriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error) { @@ -3391,7 +3390,7 @@ func (i *noopIngester) PushPreAlloc(ctx context.Context, in *cortexpb.PreallocWr return nil, nil } -func (i *noopIngester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) { +func (i *noopIngester) PushV2(ctx context.Context, req *cortexpb.WriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error) { return nil, nil } diff --git a/pkg/distributor/distributorpb/distributor.pb.go b/pkg/distributor/distributorpb/distributor.pb.go index 5946061f32..b522283831 100644 --- a/pkg/distributor/distributorpb/distributor.pb.go +++ b/pkg/distributor/distributorpb/distributor.pb.go @@ -7,7 +7,6 @@ import ( context "context" fmt "fmt" cortexpb "github.com/cortexproject/cortex/pkg/cortexpb" - cortexpbv2 "github.com/cortexproject/cortex/pkg/cortexpbv2" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" grpc "google.golang.org/grpc" @@ -30,23 +29,22 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func init() { proto.RegisterFile("distributor.proto", fileDescriptor_c518e33639ca565d) } var fileDescriptor_c518e33639ca565d = []byte{ - // 245 bytes of a gzipped FileDescriptorProto + // 236 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0xc9, 0x2c, 0x2e, 0x29, 0xca, 0x4c, 0x2a, 0x2d, 0xc9, 0x2f, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x46, 0x12, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xeb, 0x83, 0x58, 0x10, 0x25, 0x52, 0x96, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xc9, 0xf9, 0x45, 0x25, 0xa9, 0x15, 0x05, 0x45, 0xf9, 0x59, 0xa9, 0xc9, 0x25, 0x50, 0x9e, 0x7e, 0x41, 0x76, 0x3a, 0x4c, 0x22, - 0x09, 0xca, 0x80, 0x6a, 0xb5, 0x25, 0x45, 0x6b, 0x99, 0x11, 0x94, 0x59, 0x66, 0x04, 0xd1, 0x6e, - 0xd4, 0xc9, 0xc8, 0xc5, 0xed, 0x82, 0x70, 0x9f, 0x90, 0x25, 0x17, 0x4b, 0x40, 0x69, 0x71, 0x86, - 0x90, 0x98, 0x1e, 0x4c, 0x8f, 0x5e, 0x78, 0x51, 0x66, 0x49, 0x6a, 0x50, 0x6a, 0x61, 0x69, 0x6a, - 0x71, 0x89, 0x94, 0x38, 0x86, 0x78, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0xaa, 0x12, 0x83, 0x90, 0x3d, - 0x17, 0x1b, 0x48, 0x6b, 0x98, 0x91, 0x90, 0x84, 0x1e, 0xc2, 0x42, 0x54, 0xed, 0x92, 0x58, 0x64, - 0x60, 0x06, 0x38, 0x39, 0x5f, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72, - 0x8c, 0x0d, 0x8f, 0xe4, 0x18, 0x57, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, - 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, 0xc2, - 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0xe2, 0x45, 0x0a, 0xde, - 0x82, 0xa4, 0x24, 0x36, 0xb0, 0xbf, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x56, 0x3b, - 0xcc, 0x89, 0x01, 0x00, 0x00, + 0x09, 0xca, 0x80, 0x68, 0x35, 0xea, 0x64, 0xe4, 0xe2, 0x76, 0x41, 0x58, 0x20, 0x64, 0xc9, 0xc5, + 0x12, 0x50, 0x5a, 0x9c, 0x21, 0x24, 0xa6, 0x07, 0x53, 0xaf, 0x17, 0x5e, 0x94, 0x59, 0x92, 0x1a, + 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x22, 0x25, 0x8e, 0x21, 0x5e, 0x5c, 0x90, 0x9f, 0x57, 0x9c, + 0xaa, 0xc4, 0x20, 0x64, 0xcf, 0xc5, 0x06, 0xd2, 0x1a, 0x66, 0x24, 0x24, 0x81, 0x5d, 0x73, 0x98, + 0x91, 0x94, 0x24, 0x0e, 0xed, 0x61, 0x46, 0x4a, 0x0c, 0x4e, 0xce, 0x17, 0x1e, 0xca, 0x31, 0xdc, + 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, 0xe4, 0x18, + 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x17, 0x8f, 0xe4, + 0x18, 0x3e, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, + 0xe5, 0x18, 0xa2, 0x78, 0x91, 0xc2, 0xa7, 0x20, 0x29, 0x89, 0x0d, 0xec, 0x2f, 0x63, 0x40, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xce, 0xdc, 0x64, 0xe8, 0x4a, 0x01, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -62,7 +60,7 @@ const _ = grpc.SupportPackageIsVersion4 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type DistributorClient interface { Push(ctx context.Context, in *cortexpb.WriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error) - PushV2(ctx context.Context, in *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) + PushV2(ctx context.Context, in *cortexpb.WriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error) } type distributorClient struct { @@ -82,8 +80,8 @@ func (c *distributorClient) Push(ctx context.Context, in *cortexpb.WriteRequest, return out, nil } -func (c *distributorClient) PushV2(ctx context.Context, in *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) { - out := new(cortexpbv2.WriteResponse) +func (c *distributorClient) PushV2(ctx context.Context, in *cortexpb.WriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error) { + out := new(cortexpb.WriteResponseV2) err := c.cc.Invoke(ctx, "/distributor.Distributor/PushV2", in, out, opts...) if err != nil { return nil, err @@ -94,7 +92,7 @@ func (c *distributorClient) PushV2(ctx context.Context, in *cortexpbv2.WriteRequ // DistributorServer is the server API for Distributor service. type DistributorServer interface { Push(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) - PushV2(context.Context, *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) + PushV2(context.Context, *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error) } // UnimplementedDistributorServer can be embedded to have forward compatible implementations. @@ -104,7 +102,7 @@ type UnimplementedDistributorServer struct { func (*UnimplementedDistributorServer) Push(ctx context.Context, req *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Push not implemented") } -func (*UnimplementedDistributorServer) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) { +func (*UnimplementedDistributorServer) PushV2(ctx context.Context, req *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error) { return nil, status.Errorf(codes.Unimplemented, "method PushV2 not implemented") } @@ -131,7 +129,7 @@ func _Distributor_Push_Handler(srv interface{}, ctx context.Context, dec func(in } func _Distributor_PushV2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(cortexpbv2.WriteRequest) + in := new(cortexpb.WriteRequestV2) if err := dec(in); err != nil { return nil, err } @@ -143,7 +141,7 @@ func _Distributor_PushV2_Handler(srv interface{}, ctx context.Context, dec func( FullMethod: "/distributor.Distributor/PushV2", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DistributorServer).PushV2(ctx, req.(*cortexpbv2.WriteRequest)) + return srv.(DistributorServer).PushV2(ctx, req.(*cortexpb.WriteRequestV2)) } return interceptor(ctx, in, info, handler) } diff --git a/pkg/distributor/distributorpb/distributor.proto b/pkg/distributor/distributorpb/distributor.proto index 93234f1abb..7f594c1a5d 100644 --- a/pkg/distributor/distributorpb/distributor.proto +++ b/pkg/distributor/distributorpb/distributor.proto @@ -6,12 +6,11 @@ option go_package = "distributorpb"; import "gogoproto/gogo.proto"; import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto"; -import "github.com/cortexproject/cortex/pkg/cortexpbv2/cortexv2.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; service Distributor { rpc Push(cortexpb.WriteRequest) returns (cortexpb.WriteResponse) {}; - rpc PushV2(cortexpbv2.WriteRequest) returns (cortexpbv2.WriteResponse) {}; + rpc PushV2(cortexpb.WriteRequestV2) returns (cortexpb.WriteResponseV2) {}; } diff --git a/pkg/ingester/client/client.go b/pkg/ingester/client/client.go index b8b3ddefe9..dfeda83ecb 100644 --- a/pkg/ingester/client/client.go +++ b/pkg/ingester/client/client.go @@ -5,7 +5,6 @@ import ( "flag" "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/cortexpbv2" "github.com/cortexproject/cortex/pkg/util/grpcclient" "github.com/cortexproject/cortex/pkg/util/grpcencoding/snappyblock" @@ -44,7 +43,7 @@ type HealthAndIngesterClient interface { grpc_health_v1.HealthClient Close() error PushPreAlloc(ctx context.Context, in *cortexpb.PreallocWriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error) - PushPreAllocV2(ctx context.Context, in *cortexpbv2.PreallocWriteRequestV2, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) + PushPreAllocV2(ctx context.Context, in *cortexpb.PreallocWriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error) } type closableHealthAndIngesterClient struct { @@ -57,9 +56,9 @@ type closableHealthAndIngesterClient struct { inflightPushRequests *prometheus.GaugeVec } -func (c *closableHealthAndIngesterClient) PushPreAllocV2(ctx context.Context, in *cortexpbv2.PreallocWriteRequestV2, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) { - return c.handlePushRequestV2(func() (*cortexpbv2.WriteResponse, error) { - out := new(cortexpbv2.WriteResponse) +func (c *closableHealthAndIngesterClient) PushPreAllocV2(ctx context.Context, in *cortexpb.PreallocWriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error) { + return c.handlePushRequestV2(func() (*cortexpb.WriteResponseV2, error) { + out := new(cortexpb.WriteResponseV2) err := c.conn.Invoke(ctx, "/cortex.Ingester/PushV2", in, out, opts...) if err != nil { return nil, err @@ -85,13 +84,13 @@ func (c *closableHealthAndIngesterClient) Push(ctx context.Context, in *cortexpb }) } -func (c *closableHealthAndIngesterClient) PushV2(ctx context.Context, in *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) { - return c.handlePushRequestV2(func() (*cortexpbv2.WriteResponse, error) { +func (c *closableHealthAndIngesterClient) PushV2(ctx context.Context, in *cortexpb.WriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error) { + return c.handlePushRequestV2(func() (*cortexpb.WriteResponseV2, error) { return c.IngesterClient.PushV2(ctx, in, opts...) }) } -func (c *closableHealthAndIngesterClient) handlePushRequestV2(mainFunc func() (*cortexpbv2.WriteResponse, error)) (*cortexpbv2.WriteResponse, error) { +func (c *closableHealthAndIngesterClient) handlePushRequestV2(mainFunc func() (*cortexpb.WriteResponseV2, error)) (*cortexpb.WriteResponseV2, error) { currentInflight := c.inflightRequests.Inc() c.inflightPushRequests.WithLabelValues(c.addr).Set(float64(currentInflight)) defer func() { diff --git a/pkg/ingester/client/ingester.pb.go b/pkg/ingester/client/ingester.pb.go index e587962642..cb808f63e2 100644 --- a/pkg/ingester/client/ingester.pb.go +++ b/pkg/ingester/client/ingester.pb.go @@ -12,7 +12,6 @@ import ( fmt "fmt" cortexpb "github.com/cortexproject/cortex/pkg/cortexpb" github_com_cortexproject_cortex_pkg_cortexpb "github.com/cortexproject/cortex/pkg/cortexpb" - cortexpbv2 "github.com/cortexproject/cortex/pkg/cortexpbv2" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" grpc "google.golang.org/grpc" @@ -1485,93 +1484,93 @@ func init() { func init() { proto.RegisterFile("ingester.proto", fileDescriptor_60f6df4f3586b478) } var fileDescriptor_60f6df4f3586b478 = []byte{ - // 1368 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x73, 0x14, 0x55, - 0x14, 0x9e, 0xce, 0x3c, 0x98, 0x39, 0xf3, 0x60, 0x72, 0x13, 0xc8, 0xa4, 0x91, 0x0e, 0x34, 0x85, - 0xa6, 0x54, 0x26, 0x30, 0x6a, 0x15, 0xa8, 0x48, 0x25, 0x10, 0x20, 0x40, 0x08, 0x74, 0x02, 0x5a, - 0x96, 0x56, 0x57, 0x67, 0xe6, 0x92, 0xb4, 0xf4, 0x63, 0xe8, 0x7b, 0x27, 0x05, 0xae, 0xb4, 0xfc, - 0x01, 0xba, 0x74, 0xeb, 0x4a, 0x7f, 0x80, 0x3f, 0x82, 0x25, 0x0b, 0x17, 0x94, 0x0b, 0x4a, 0x86, - 0x8d, 0x4b, 0xfc, 0x07, 0x56, 0xdf, 0x47, 0xbf, 0x32, 0x79, 0x60, 0x81, 0xbb, 0xbe, 0xe7, 0x7c, - 0xe7, 0xdc, 0x73, 0xbf, 0x7b, 0xce, 0x3d, 0x67, 0x06, 0x1a, 0xb6, 0xb7, 0x81, 0x09, 0xc5, 0x41, - 0xbb, 0x1f, 0xf8, 0xd4, 0x47, 0xa5, 0xae, 0x1f, 0x50, 0xfc, 0x50, 0x9d, 0xdc, 0xf0, 0x37, 0x7c, - 0x26, 0x9a, 0x0b, 0xbf, 0xb8, 0x56, 0x3d, 0xb7, 0x61, 0xd3, 0xcd, 0xc1, 0x7a, 0xbb, 0xeb, 0xbb, - 0x73, 0x1c, 0xd8, 0x0f, 0xfc, 0x6f, 0x70, 0x97, 0x8a, 0xd5, 0x5c, 0xff, 0xfe, 0x86, 0x54, 0xac, - 0x8b, 0x0f, 0x61, 0x7a, 0xfe, 0x55, 0x4c, 0xb7, 0x3a, 0xe2, 0x73, 0xab, 0xc3, 0xcd, 0xf5, 0xf3, - 0x50, 0x35, 0xb0, 0xd5, 0x33, 0xf0, 0x83, 0x01, 0x26, 0x14, 0xb5, 0xe1, 0xc0, 0x83, 0x01, 0x0e, - 0x6c, 0x4c, 0x5a, 0xca, 0xb1, 0xfc, 0x6c, 0xb5, 0x33, 0xd9, 0x16, 0xbb, 0xdd, 0x1e, 0xe0, 0xe0, - 0x91, 0x80, 0x19, 0x12, 0xa4, 0x5f, 0x80, 0x1a, 0x37, 0x27, 0x7d, 0xdf, 0x23, 0x18, 0xcd, 0xc1, - 0x81, 0x00, 0x93, 0x81, 0x43, 0xa5, 0xfd, 0xa1, 0x8c, 0x3d, 0xc7, 0x19, 0x12, 0xa5, 0x5f, 0x87, - 0x7a, 0x4a, 0x83, 0x3e, 0x06, 0xa0, 0xb6, 0x8b, 0xc9, 0xa8, 0x20, 0xfa, 0xeb, 0xed, 0x35, 0xdb, - 0xc5, 0xab, 0x4c, 0xb7, 0x50, 0x78, 0xfc, 0x6c, 0x26, 0x67, 0x24, 0xd0, 0xfa, 0xcf, 0x0a, 0xd4, - 0x92, 0x71, 0xa2, 0xf7, 0x01, 0x11, 0x6a, 0x05, 0xd4, 0x64, 0x20, 0x6a, 0xb9, 0x7d, 0xd3, 0x0d, - 0x9d, 0x2a, 0xb3, 0x79, 0xa3, 0xc9, 0x34, 0x6b, 0x52, 0xb1, 0x4c, 0xd0, 0x2c, 0x34, 0xb1, 0xd7, - 0x4b, 0x63, 0xc7, 0x18, 0xb6, 0x81, 0xbd, 0x5e, 0x12, 0x79, 0x1a, 0xca, 0xae, 0x45, 0xbb, 0x9b, - 0x38, 0x20, 0xad, 0x7c, 0x9a, 0xa7, 0x1b, 0xd6, 0x3a, 0x76, 0x96, 0xb9, 0xd2, 0x88, 0x50, 0xfa, - 0x2f, 0x0a, 0x4c, 0x2e, 0x3e, 0xc4, 0x6e, 0xdf, 0xb1, 0x82, 0xff, 0x25, 0xc4, 0x33, 0xdb, 0x42, - 0x3c, 0x34, 0x2a, 0x44, 0x92, 0x88, 0xf1, 0x2b, 0x98, 0x60, 0xa1, 0xad, 0xd2, 0x00, 0x5b, 0x6e, - 0x74, 0x23, 0x17, 0xa0, 0xda, 0xdd, 0x1c, 0x78, 0xf7, 0x53, 0x57, 0x32, 0x25, 0x9d, 0xc5, 0x17, - 0x72, 0x31, 0x04, 0x89, 0x5b, 0x49, 0x5a, 0x5c, 0x2b, 0x94, 0xc7, 0x9a, 0x79, 0x7d, 0x15, 0x0e, - 0x65, 0x08, 0x78, 0x0d, 0x37, 0xfe, 0x87, 0x02, 0x88, 0x1d, 0xe7, 0xae, 0xe5, 0x0c, 0x30, 0x91, - 0xa4, 0x1e, 0x05, 0x70, 0x42, 0xa9, 0xe9, 0x59, 0x2e, 0x66, 0x64, 0x56, 0x8c, 0x0a, 0x93, 0xdc, - 0xb4, 0x5c, 0xbc, 0x03, 0xe7, 0x63, 0xaf, 0xc0, 0x79, 0x7e, 0x4f, 0xce, 0x0b, 0xc7, 0x94, 0x7d, - 0x70, 0x8e, 0x26, 0xa1, 0xe8, 0xd8, 0xae, 0x4d, 0x5b, 0x45, 0xe6, 0x91, 0x2f, 0xf4, 0xb3, 0x30, - 0x91, 0x3a, 0x95, 0x60, 0xea, 0x38, 0xd4, 0xf8, 0xb1, 0xb6, 0x98, 0x9c, 0x71, 0x55, 0x31, 0xaa, - 0x4e, 0x0c, 0xd5, 0x3f, 0x83, 0xe9, 0x84, 0x65, 0xe6, 0x26, 0xf7, 0x61, 0xff, 0xbb, 0x02, 0xe3, - 0x37, 0x24, 0x51, 0xe4, 0x4d, 0x27, 0x69, 0x74, 0xfa, 0x7c, 0xe2, 0xf4, 0xff, 0x81, 0x46, 0xfd, - 0x23, 0x91, 0x06, 0x22, 0x6a, 0x71, 0xde, 0x19, 0xa8, 0xc6, 0x69, 0x20, 0x8f, 0x0b, 0x51, 0x1e, - 0x10, 0xfd, 0x13, 0x68, 0xc5, 0x66, 0x19, 0xb2, 0xf6, 0x34, 0x46, 0xd0, 0xbc, 0x43, 0x70, 0xb0, - 0x4a, 0x2d, 0x2a, 0x89, 0xd2, 0xbf, 0x1f, 0x83, 0xf1, 0x84, 0x50, 0xb8, 0x3a, 0x29, 0xdb, 0x81, - 0xed, 0x7b, 0x66, 0x60, 0x51, 0x9e, 0x92, 0x8a, 0x51, 0x8f, 0xa4, 0x86, 0x45, 0x71, 0x98, 0xb5, - 0xde, 0xc0, 0x35, 0x45, 0x21, 0x84, 0x8c, 0x15, 0x8c, 0x8a, 0x37, 0x70, 0x79, 0xf6, 0x87, 0x97, - 0x60, 0xf5, 0x6d, 0x33, 0xe3, 0x29, 0xcf, 0x3c, 0x35, 0xad, 0xbe, 0xbd, 0x94, 0x72, 0xd6, 0x86, - 0x89, 0x60, 0xe0, 0xe0, 0x2c, 0xbc, 0xc0, 0xe0, 0xe3, 0xa1, 0x2a, 0x8d, 0x3f, 0x01, 0x75, 0xab, - 0x4b, 0xed, 0x2d, 0x2c, 0xf7, 0x2f, 0xb2, 0xfd, 0x6b, 0x5c, 0x28, 0x42, 0x38, 0x01, 0x75, 0xc7, - 0xb7, 0x7a, 0xb8, 0x67, 0xae, 0x3b, 0x7e, 0xf7, 0x3e, 0x69, 0x95, 0x38, 0x88, 0x0b, 0x17, 0x98, - 0x4c, 0xff, 0x1a, 0x26, 0x42, 0x0a, 0x96, 0x2e, 0xa5, 0x49, 0x98, 0x82, 0x03, 0x03, 0x82, 0x03, - 0xd3, 0xee, 0x89, 0x82, 0x2c, 0x85, 0xcb, 0xa5, 0x1e, 0x3a, 0x05, 0x85, 0x9e, 0x45, 0x2d, 0x76, - 0xe0, 0x6a, 0x67, 0x5a, 0x5e, 0xf5, 0x36, 0x1a, 0x0d, 0x06, 0xd3, 0xaf, 0x00, 0x0a, 0x55, 0x24, - 0xed, 0xfd, 0x0c, 0x14, 0x49, 0x28, 0x10, 0xef, 0xc7, 0x91, 0xa4, 0x97, 0x4c, 0x24, 0x06, 0x47, - 0xea, 0x8f, 0x15, 0xd0, 0x96, 0x31, 0x0d, 0xec, 0x2e, 0xb9, 0xec, 0x07, 0xe9, 0xcc, 0x7a, 0xc3, - 0x79, 0x7f, 0x16, 0x6a, 0x32, 0x75, 0x4d, 0x82, 0xe9, 0xee, 0x0f, 0x74, 0x55, 0x42, 0x57, 0x31, - 0x8d, 0x2b, 0xa6, 0x90, 0x7c, 0x2f, 0xae, 0xc3, 0xcc, 0x8e, 0x27, 0x11, 0x04, 0xcd, 0x42, 0xc9, - 0x65, 0x10, 0xc1, 0x50, 0x33, 0x7e, 0x61, 0xb9, 0xa9, 0x21, 0xf4, 0xfa, 0x6d, 0x38, 0xb9, 0x83, - 0xb3, 0x4c, 0x85, 0xec, 0xdf, 0x65, 0x0b, 0x0e, 0x0b, 0x97, 0xcb, 0x98, 0x5a, 0xe1, 0x35, 0xca, - 0x82, 0x59, 0x81, 0xa9, 0x6d, 0x1a, 0xe1, 0xfe, 0x43, 0x28, 0xbb, 0x42, 0x26, 0x36, 0x68, 0x65, - 0x37, 0x88, 0x6c, 0x22, 0xa4, 0xfe, 0x8f, 0x02, 0x07, 0x33, 0x3d, 0x29, 0xbc, 0x98, 0x7b, 0x81, - 0xef, 0x9a, 0x72, 0x26, 0x8b, 0x73, 0xb0, 0x11, 0xca, 0x97, 0x84, 0x78, 0xa9, 0x97, 0x4c, 0xd2, - 0xb1, 0x54, 0x92, 0x7a, 0x50, 0x62, 0xa5, 0x2f, 0x9b, 0xe9, 0x44, 0x1c, 0x0a, 0xa3, 0xe8, 0x96, - 0x65, 0x07, 0x0b, 0xf3, 0x61, 0x7f, 0xfa, 0xf3, 0xd9, 0xcc, 0x2b, 0x8d, 0x73, 0xdc, 0x7e, 0xbe, - 0x67, 0xf5, 0x29, 0x0e, 0x0c, 0xb1, 0x0b, 0x7a, 0x0f, 0x4a, 0xbc, 0x85, 0xb6, 0x0a, 0x6c, 0xbf, - 0xba, 0xcc, 0x8d, 0x64, 0x97, 0x15, 0x10, 0xfd, 0x47, 0x05, 0x8a, 0xfc, 0xa4, 0x6f, 0x2a, 0x61, - 0x55, 0x28, 0x63, 0xaf, 0xeb, 0xf7, 0x6c, 0x6f, 0x83, 0xbd, 0x38, 0x45, 0x23, 0x5a, 0x23, 0x24, - 0xea, 0x37, 0xcc, 0xc8, 0x9a, 0x28, 0xd2, 0x79, 0xa8, 0xa7, 0x32, 0x27, 0x35, 0x31, 0x29, 0xfb, - 0x9a, 0x98, 0x4c, 0xa8, 0x25, 0x35, 0xe8, 0x24, 0x14, 0xe8, 0xa3, 0x3e, 0x7f, 0x3a, 0x1b, 0x9d, - 0x71, 0x69, 0xcd, 0xd4, 0x6b, 0x8f, 0xfa, 0xd8, 0x60, 0xea, 0x30, 0x1a, 0xd6, 0xf4, 0xf9, 0xf5, - 0xb1, 0xef, 0xb0, 0x68, 0x58, 0xc7, 0x63, 0xa1, 0x57, 0x0c, 0xbe, 0xd0, 0x7f, 0x50, 0xa0, 0x11, - 0x67, 0xca, 0x65, 0xdb, 0xc1, 0xaf, 0x23, 0x51, 0x54, 0x28, 0xdf, 0xb3, 0x1d, 0xcc, 0x62, 0xe0, - 0xdb, 0x45, 0xeb, 0x51, 0x4c, 0xbd, 0x7b, 0x0d, 0x2a, 0xd1, 0x11, 0x50, 0x05, 0x8a, 0x8b, 0xb7, - 0xef, 0xcc, 0xdf, 0x68, 0xe6, 0x50, 0x1d, 0x2a, 0x37, 0x57, 0xd6, 0x4c, 0xbe, 0x54, 0xd0, 0x41, - 0xa8, 0x1a, 0x8b, 0x57, 0x16, 0xbf, 0x30, 0x97, 0xe7, 0xd7, 0x2e, 0x5e, 0x6d, 0x8e, 0x21, 0x04, - 0x0d, 0x2e, 0xb8, 0xb9, 0x22, 0x64, 0xf9, 0xce, 0xaf, 0x65, 0x28, 0xcb, 0x18, 0xd1, 0x39, 0x28, - 0xdc, 0x1a, 0x90, 0x4d, 0x74, 0x38, 0xce, 0xd4, 0xcf, 0x03, 0x9b, 0x62, 0x51, 0x79, 0xea, 0xd4, - 0x36, 0x39, 0xaf, 0x3b, 0x3d, 0x87, 0x2e, 0x40, 0x29, 0x34, 0xbd, 0xdb, 0x41, 0x71, 0xc5, 0x6d, - 0x75, 0xd2, 0xe6, 0xd3, 0x23, 0x34, 0x91, 0x83, 0x4b, 0x50, 0x4d, 0x4c, 0x92, 0x68, 0xe4, 0x8f, - 0x08, 0xf5, 0x48, 0x4a, 0x9a, 0x7e, 0x5b, 0xf4, 0xdc, 0x69, 0x05, 0xad, 0x40, 0x83, 0xa9, 0xe4, - 0xd8, 0x48, 0xd0, 0x5b, 0xd2, 0x64, 0xd4, 0x28, 0xad, 0x1e, 0xdd, 0x41, 0x1b, 0x85, 0x75, 0x15, - 0xaa, 0x89, 0xe1, 0x08, 0xa9, 0xa9, 0x0c, 0x4c, 0x4d, 0x90, 0x71, 0x70, 0x23, 0xe6, 0x30, 0x3d, - 0x87, 0xee, 0x8a, 0x29, 0x29, 0x39, 0x66, 0xed, 0xea, 0xef, 0xf8, 0x08, 0xdd, 0x88, 0x23, 0x2f, - 0x02, 0xc4, 0x03, 0x09, 0x9a, 0x4e, 0x19, 0x25, 0x27, 0x32, 0x55, 0x1d, 0xa5, 0x8a, 0xc2, 0x5b, - 0x85, 0x66, 0x76, 0xae, 0xd9, 0xcd, 0xd9, 0xb1, 0xed, 0xaa, 0x11, 0xb1, 0x2d, 0x40, 0x25, 0xea, - 0xc9, 0x51, 0x62, 0xb4, 0xb3, 0x23, 0x90, 0xba, 0x73, 0x03, 0xd7, 0x73, 0xe8, 0x32, 0xd4, 0xe6, - 0x1d, 0x67, 0x3f, 0x6e, 0xd4, 0xa4, 0x86, 0x64, 0xfd, 0x38, 0x51, 0xdb, 0xc8, 0xf6, 0x28, 0xf4, - 0x76, 0xf4, 0x32, 0xec, 0xda, 0xdb, 0xd5, 0x77, 0xf6, 0xc4, 0x45, 0xbb, 0x7d, 0x0b, 0x47, 0x77, - 0xed, 0x88, 0xfb, 0xde, 0xf3, 0xd4, 0x1e, 0xb8, 0x11, 0xac, 0xaf, 0xc1, 0xc1, 0x4c, 0x83, 0x44, - 0x5a, 0xc6, 0x4b, 0xa6, 0xa7, 0xaa, 0x33, 0x3b, 0xea, 0xa5, 0xdf, 0x85, 0x4f, 0x9f, 0x3c, 0xd7, - 0x72, 0x4f, 0x9f, 0x6b, 0xb9, 0x97, 0xcf, 0x35, 0xe5, 0xbb, 0xa1, 0xa6, 0xfc, 0x36, 0xd4, 0x94, - 0xc7, 0x43, 0x4d, 0x79, 0x32, 0xd4, 0x94, 0xbf, 0x86, 0x9a, 0xf2, 0xf7, 0x50, 0xcb, 0xbd, 0x1c, - 0x6a, 0xca, 0x4f, 0x2f, 0xb4, 0xdc, 0x93, 0x17, 0x5a, 0xee, 0xe9, 0x0b, 0x2d, 0xf7, 0x65, 0xa9, - 0xeb, 0xd8, 0xd8, 0xa3, 0xeb, 0x25, 0xf6, 0xdf, 0xc1, 0x07, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, - 0x63, 0x80, 0x72, 0xdc, 0xe5, 0x10, 0x00, 0x00, + // 1361 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x73, 0xd4, 0xc6, + 0x13, 0x97, 0xbc, 0x0f, 0x76, 0x7b, 0x1f, 0xac, 0xc7, 0x06, 0xaf, 0xc5, 0x1f, 0x19, 0x44, 0xf1, + 0x8f, 0x2b, 0x09, 0x6b, 0xd8, 0x24, 0x55, 0x90, 0x17, 0x65, 0x83, 0x01, 0x03, 0xc6, 0x20, 0x1b, + 0x27, 0x95, 0x4a, 0x4a, 0x25, 0xef, 0x0e, 0xb6, 0x82, 0x1e, 0x8b, 0x34, 0x4b, 0x41, 0x4e, 0x49, + 0xe5, 0x03, 0x24, 0xc7, 0x5c, 0x73, 0x4a, 0x3e, 0x40, 0x3e, 0x04, 0x47, 0x0e, 0x39, 0x50, 0x39, + 0x50, 0x61, 0xb9, 0xe4, 0x48, 0xbe, 0x41, 0x4a, 0xf3, 0xd0, 0x4a, 0xb2, 0xd6, 0x5e, 0x52, 0x38, + 0x37, 0x4d, 0xf7, 0xaf, 0x7b, 0xba, 0x7f, 0xd3, 0x33, 0xdd, 0xbb, 0x50, 0xb7, 0xdc, 0x6d, 0x1c, + 0x10, 0xec, 0xb7, 0x7a, 0xbe, 0x47, 0x3c, 0x54, 0xec, 0x78, 0x3e, 0xc1, 0x8f, 0x94, 0xe9, 0x6d, + 0x6f, 0xdb, 0xa3, 0xa2, 0x85, 0xf0, 0x8b, 0x69, 0x95, 0x0b, 0xdb, 0x16, 0xd9, 0xe9, 0x6f, 0xb5, + 0x3a, 0x9e, 0xb3, 0xc0, 0x80, 0x3d, 0xdf, 0xfb, 0x1a, 0x77, 0x08, 0x5f, 0x2d, 0xf4, 0xee, 0x6f, + 0x0b, 0xc5, 0x16, 0xff, 0x60, 0xa6, 0xda, 0x27, 0x50, 0xd1, 0xb1, 0xd9, 0xd5, 0xf1, 0x83, 0x3e, + 0x0e, 0x08, 0x6a, 0xc1, 0xa1, 0x07, 0x7d, 0xec, 0x5b, 0x38, 0x68, 0xca, 0x27, 0x72, 0xf3, 0x95, + 0xf6, 0x74, 0x8b, 0xc3, 0xef, 0xf4, 0xb1, 0xff, 0x98, 0xc3, 0x74, 0x01, 0xd2, 0x2e, 0x42, 0x95, + 0x99, 0x07, 0x3d, 0xcf, 0x0d, 0x30, 0x5a, 0x80, 0x43, 0x3e, 0x0e, 0xfa, 0x36, 0x11, 0xf6, 0x47, + 0x52, 0xf6, 0x0c, 0xa7, 0x0b, 0x94, 0x76, 0x03, 0x6a, 0x09, 0x0d, 0xfa, 0x10, 0x80, 0x58, 0x0e, + 0x0e, 0xb2, 0x82, 0xe8, 0x6d, 0xb5, 0x36, 0x2c, 0x07, 0xaf, 0x53, 0xdd, 0x52, 0xfe, 0xc9, 0xf3, + 0x39, 0x49, 0x8f, 0xa1, 0xb5, 0x9f, 0x64, 0xa8, 0xc6, 0xe3, 0x44, 0xef, 0x02, 0x0a, 0x88, 0xe9, + 0x13, 0x83, 0x82, 0x88, 0xe9, 0xf4, 0x0c, 0x27, 0x74, 0x2a, 0xcf, 0xe7, 0xf4, 0x06, 0xd5, 0x6c, + 0x08, 0xc5, 0x6a, 0x80, 0xe6, 0xa1, 0x81, 0xdd, 0x6e, 0x12, 0x3b, 0x41, 0xb1, 0x75, 0xec, 0x76, + 0xe3, 0xc8, 0xb3, 0x50, 0x72, 0x4c, 0xd2, 0xd9, 0xc1, 0x7e, 0xd0, 0xcc, 0x25, 0x79, 0xba, 0x69, + 0x6e, 0x61, 0x7b, 0x95, 0x29, 0xf5, 0x08, 0xa5, 0xfd, 0x2c, 0xc3, 0xf4, 0xf2, 0x23, 0xec, 0xf4, + 0x6c, 0xd3, 0xff, 0x4f, 0x42, 0x3c, 0xb7, 0x2b, 0xc4, 0x23, 0x59, 0x21, 0x06, 0xb1, 0x18, 0xbf, + 0x84, 0x29, 0x1a, 0xda, 0x3a, 0xf1, 0xb1, 0xe9, 0x44, 0x27, 0x72, 0x11, 0x2a, 0x9d, 0x9d, 0xbe, + 0x7b, 0x3f, 0x71, 0x24, 0x33, 0xc2, 0xd9, 0xf0, 0x40, 0x2e, 0x85, 0x20, 0x7e, 0x2a, 0x71, 0x8b, + 0xeb, 0xf9, 0xd2, 0x44, 0x23, 0xa7, 0xad, 0xc3, 0x91, 0x14, 0x01, 0x6f, 0xe0, 0xc4, 0x7f, 0x97, + 0x01, 0xd1, 0x74, 0x36, 0x4d, 0xbb, 0x8f, 0x03, 0x41, 0xea, 0x71, 0x00, 0x3b, 0x94, 0x1a, 0xae, + 0xe9, 0x60, 0x4a, 0x66, 0x59, 0x2f, 0x53, 0xc9, 0x2d, 0xd3, 0xc1, 0x23, 0x38, 0x9f, 0x78, 0x0d, + 0xce, 0x73, 0xfb, 0x72, 0x9e, 0x3f, 0x21, 0x8f, 0xc1, 0x39, 0x9a, 0x86, 0x82, 0x6d, 0x39, 0x16, + 0x69, 0x16, 0xa8, 0x47, 0xb6, 0xd0, 0xce, 0xc3, 0x54, 0x22, 0x2b, 0xce, 0xd4, 0x49, 0xa8, 0xb2, + 0xb4, 0x1e, 0x52, 0x39, 0xe5, 0xaa, 0xac, 0x57, 0xec, 0x21, 0x54, 0xfb, 0x14, 0x66, 0x63, 0x96, + 0xa9, 0x93, 0x1c, 0xc3, 0xfe, 0x37, 0x19, 0x26, 0x6f, 0x0a, 0xa2, 0x82, 0x83, 0x2e, 0xd2, 0x28, + 0xfb, 0x5c, 0x2c, 0xfb, 0x7f, 0x41, 0xa3, 0xf6, 0x01, 0x2f, 0x03, 0x1e, 0x35, 0xcf, 0x77, 0x0e, + 0x2a, 0xc3, 0x32, 0x10, 0xe9, 0x42, 0x54, 0x07, 0x81, 0xf6, 0x11, 0x34, 0x87, 0x66, 0x29, 0xb2, + 0xf6, 0x35, 0x46, 0xd0, 0xb8, 0x1b, 0x60, 0x7f, 0x9d, 0x98, 0x44, 0x10, 0xa5, 0x7d, 0x37, 0x01, + 0x93, 0x31, 0x21, 0x77, 0x75, 0x5a, 0xbc, 0xe7, 0x96, 0xe7, 0x1a, 0xbe, 0x49, 0x58, 0x49, 0xca, + 0x7a, 0x2d, 0x92, 0xea, 0x26, 0xc1, 0x61, 0xd5, 0xba, 0x7d, 0xc7, 0xe0, 0x17, 0x21, 0x64, 0x2c, + 0xaf, 0x97, 0xdd, 0xbe, 0xc3, 0xaa, 0x3f, 0x3c, 0x04, 0xb3, 0x67, 0x19, 0x29, 0x4f, 0x39, 0xea, + 0xa9, 0x61, 0xf6, 0xac, 0x95, 0x84, 0xb3, 0x16, 0x4c, 0xf9, 0x7d, 0x1b, 0xa7, 0xe1, 0x79, 0x0a, + 0x9f, 0x0c, 0x55, 0x49, 0xfc, 0x29, 0xa8, 0x99, 0x1d, 0x62, 0x3d, 0xc4, 0x62, 0xff, 0x02, 0xdd, + 0xbf, 0xca, 0x84, 0x3c, 0x84, 0x53, 0x50, 0xb3, 0x3d, 0xb3, 0x8b, 0xbb, 0xc6, 0x96, 0xed, 0x75, + 0xee, 0x07, 0xcd, 0x22, 0x03, 0x31, 0xe1, 0x12, 0x95, 0x69, 0x5f, 0xc1, 0x54, 0x48, 0xc1, 0xca, + 0xe5, 0x24, 0x09, 0x33, 0x70, 0xa8, 0x1f, 0x60, 0xdf, 0xb0, 0xba, 0xfc, 0x42, 0x16, 0xc3, 0xe5, + 0x4a, 0x17, 0x9d, 0x81, 0x7c, 0xd7, 0x24, 0x26, 0x4d, 0xb8, 0xd2, 0x9e, 0x15, 0x47, 0xbd, 0x8b, + 0x46, 0x9d, 0xc2, 0xb4, 0xab, 0x80, 0x42, 0x55, 0x90, 0xf4, 0x7e, 0x0e, 0x0a, 0x41, 0x28, 0xe0, + 0xef, 0xc7, 0xb1, 0xb8, 0x97, 0x54, 0x24, 0x3a, 0x43, 0x6a, 0x4f, 0x64, 0x50, 0x57, 0x31, 0xf1, + 0xad, 0x4e, 0x70, 0xc5, 0xf3, 0x93, 0x95, 0x75, 0xc0, 0x75, 0x7f, 0x1e, 0xaa, 0xa2, 0x74, 0x8d, + 0x00, 0x93, 0xbd, 0x1f, 0xe8, 0x8a, 0x80, 0xae, 0x63, 0x32, 0xbc, 0x31, 0xf9, 0xf8, 0x7b, 0x71, + 0x03, 0xe6, 0x46, 0x66, 0xc2, 0x09, 0x9a, 0x87, 0xa2, 0x43, 0x21, 0x9c, 0xa1, 0xc6, 0xf0, 0x85, + 0x65, 0xa6, 0x3a, 0xd7, 0x6b, 0x77, 0xe0, 0xf4, 0x08, 0x67, 0xa9, 0x1b, 0x32, 0xbe, 0xcb, 0x26, + 0x1c, 0xe5, 0x2e, 0x57, 0x31, 0x31, 0xc3, 0x63, 0x14, 0x17, 0x66, 0x0d, 0x66, 0x76, 0x69, 0xb8, + 0xfb, 0xf7, 0xa1, 0xe4, 0x70, 0x19, 0xdf, 0xa0, 0x99, 0xde, 0x20, 0xb2, 0x89, 0x90, 0xda, 0xdf, + 0x32, 0x1c, 0x4e, 0xf5, 0xa4, 0xf0, 0x60, 0xee, 0xf9, 0x9e, 0x63, 0x88, 0xa1, 0x6a, 0x58, 0x83, + 0xf5, 0x50, 0xbe, 0xc2, 0xc5, 0x2b, 0xdd, 0x78, 0x91, 0x4e, 0x24, 0x8a, 0xd4, 0x85, 0x22, 0xbd, + 0xfa, 0xa2, 0x99, 0x4e, 0x0d, 0x43, 0xa1, 0x14, 0xdd, 0x36, 0x2d, 0x7f, 0x69, 0x31, 0xec, 0x4f, + 0x7f, 0x3c, 0x9f, 0x7b, 0xad, 0x79, 0x8c, 0xd9, 0x2f, 0x76, 0xcd, 0x1e, 0xc1, 0xbe, 0xce, 0x77, + 0x41, 0xef, 0x40, 0x91, 0xb5, 0xd0, 0x66, 0x9e, 0xee, 0x57, 0x13, 0xb5, 0x11, 0xef, 0xb2, 0x1c, + 0xa2, 0xfd, 0x20, 0x43, 0x81, 0x65, 0x7a, 0x50, 0x05, 0xab, 0x40, 0x09, 0xbb, 0x1d, 0xaf, 0x6b, + 0xb9, 0xdb, 0xf4, 0xc5, 0x29, 0xe8, 0xd1, 0x1a, 0x21, 0x7e, 0x7f, 0xc3, 0x8a, 0xac, 0xf2, 0x4b, + 0xba, 0x08, 0xb5, 0x44, 0xe5, 0x24, 0x26, 0x26, 0x79, 0xac, 0x89, 0xc9, 0x80, 0x6a, 0x5c, 0x83, + 0x4e, 0x43, 0x9e, 0x3c, 0xee, 0xb1, 0xa7, 0xb3, 0xde, 0x9e, 0x14, 0xd6, 0x54, 0xbd, 0xf1, 0xb8, + 0x87, 0x75, 0xaa, 0x0e, 0xa3, 0xa1, 0x4d, 0x9f, 0x1d, 0x1f, 0xfd, 0x0e, 0x2f, 0x0d, 0xed, 0x78, + 0x34, 0xf4, 0xb2, 0xce, 0x16, 0xda, 0xf7, 0x32, 0xd4, 0x87, 0x95, 0x72, 0xc5, 0xb2, 0xf1, 0x9b, + 0x28, 0x14, 0x05, 0x4a, 0xf7, 0x2c, 0x1b, 0xd3, 0x18, 0xd8, 0x76, 0xd1, 0x3a, 0x8b, 0xa9, 0xb7, + 0xaf, 0x43, 0x39, 0x4a, 0x01, 0x95, 0xa1, 0xb0, 0x7c, 0xe7, 0xee, 0xe2, 0xcd, 0x86, 0x84, 0x6a, + 0x50, 0xbe, 0xb5, 0xb6, 0x61, 0xb0, 0xa5, 0x8c, 0x0e, 0x43, 0x45, 0x5f, 0xbe, 0xba, 0xfc, 0xb9, + 0xb1, 0xba, 0xb8, 0x71, 0xe9, 0x5a, 0x63, 0x02, 0x21, 0xa8, 0x33, 0xc1, 0xad, 0x35, 0x2e, 0xcb, + 0xb5, 0x7f, 0x29, 0x41, 0x49, 0xc4, 0x88, 0x2e, 0x40, 0xfe, 0x76, 0x3f, 0xd8, 0x41, 0x47, 0x87, + 0x95, 0xfa, 0x99, 0x6f, 0x11, 0xcc, 0x6f, 0x9e, 0x32, 0xb3, 0x4b, 0xce, 0xee, 0x9d, 0x26, 0xa1, + 0x8b, 0x50, 0x0c, 0x4d, 0x37, 0xdb, 0xa8, 0x99, 0x6d, 0xbc, 0xd9, 0x56, 0x66, 0x47, 0x98, 0x6f, + 0xb6, 0x35, 0x09, 0x5d, 0x86, 0x4a, 0x6c, 0x92, 0x44, 0x99, 0x3f, 0x22, 0x94, 0x63, 0x09, 0x69, + 0xf2, 0x6d, 0xd1, 0xa4, 0xb3, 0x32, 0x5a, 0x83, 0x3a, 0x55, 0x89, 0xb1, 0x31, 0x40, 0xff, 0x13, + 0x26, 0x59, 0xa3, 0xb4, 0x72, 0x7c, 0x84, 0x36, 0xca, 0xeb, 0x1a, 0x54, 0x62, 0xc3, 0x11, 0x52, + 0x12, 0x15, 0x98, 0x98, 0x20, 0x87, 0xc1, 0x65, 0xcc, 0x61, 0x9a, 0x84, 0x36, 0xf9, 0x94, 0x14, + 0x1f, 0xb3, 0xf6, 0xf4, 0x77, 0x32, 0x43, 0x97, 0x91, 0xf2, 0x32, 0xc0, 0x70, 0x20, 0x41, 0xb3, + 0x09, 0xa3, 0xf8, 0x44, 0xa6, 0x28, 0x59, 0xaa, 0x28, 0xbc, 0x75, 0x68, 0xa4, 0xe7, 0x9a, 0xbd, + 0x9c, 0x9d, 0xd8, 0xad, 0xca, 0x88, 0x6d, 0x09, 0xca, 0x51, 0x4f, 0x8e, 0x0a, 0xa3, 0x95, 0x1e, + 0x81, 0x94, 0xd1, 0x0d, 0x5c, 0x93, 0xd0, 0x15, 0xa8, 0x2e, 0xda, 0xf6, 0x38, 0x6e, 0x94, 0xb8, + 0x26, 0x48, 0xfb, 0xb1, 0xa3, 0xb6, 0x91, 0xee, 0x51, 0xe8, 0xff, 0xd1, 0xcb, 0xb0, 0x67, 0x6f, + 0x57, 0xde, 0xda, 0x17, 0x17, 0xed, 0xf6, 0x0d, 0x1c, 0xdf, 0xb3, 0x23, 0x8e, 0xbd, 0xe7, 0x99, + 0x7d, 0x70, 0x19, 0xac, 0x6f, 0xc0, 0xe1, 0x54, 0x83, 0x44, 0x6a, 0xca, 0x4b, 0xaa, 0xa7, 0x2a, + 0x73, 0x23, 0xf5, 0xc2, 0xef, 0xd2, 0xc7, 0x4f, 0x5f, 0xa8, 0xd2, 0xb3, 0x17, 0xaa, 0xf4, 0xea, + 0x85, 0x2a, 0x7f, 0x3b, 0x50, 0xe5, 0x5f, 0x07, 0xaa, 0xfc, 0x64, 0xa0, 0xca, 0x4f, 0x07, 0xaa, + 0xfc, 0xe7, 0x40, 0x95, 0xff, 0x1a, 0xa8, 0xd2, 0xab, 0x81, 0x2a, 0xff, 0xf8, 0x52, 0x95, 0x9e, + 0xbe, 0x54, 0xa5, 0x67, 0x2f, 0x55, 0xe9, 0x8b, 0x62, 0xc7, 0xb6, 0xb0, 0x4b, 0xb6, 0x8a, 0xf4, + 0xbf, 0x83, 0xf7, 0xfe, 0x09, 0x00, 0x00, 0xff, 0xff, 0x64, 0x59, 0xb6, 0x88, 0xa6, 0x10, 0x00, + 0x00, } func (x MatchType) String() string { @@ -2782,7 +2781,7 @@ const _ = grpc.SupportPackageIsVersion4 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type IngesterClient interface { Push(ctx context.Context, in *cortexpb.WriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error) - PushV2(ctx context.Context, in *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) + PushV2(ctx context.Context, in *cortexpb.WriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error) QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error) QueryExemplars(ctx context.Context, in *ExemplarQueryRequest, opts ...grpc.CallOption) (*ExemplarQueryResponse, error) LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error) @@ -2813,8 +2812,8 @@ func (c *ingesterClient) Push(ctx context.Context, in *cortexpb.WriteRequest, op return out, nil } -func (c *ingesterClient) PushV2(ctx context.Context, in *cortexpbv2.WriteRequest, opts ...grpc.CallOption) (*cortexpbv2.WriteResponse, error) { - out := new(cortexpbv2.WriteResponse) +func (c *ingesterClient) PushV2(ctx context.Context, in *cortexpb.WriteRequestV2, opts ...grpc.CallOption) (*cortexpb.WriteResponseV2, error) { + out := new(cortexpb.WriteResponseV2) err := c.cc.Invoke(ctx, "/cortex.Ingester/PushV2", in, out, opts...) if err != nil { return nil, err @@ -3016,7 +3015,7 @@ func (c *ingesterClient) MetricsMetadata(ctx context.Context, in *MetricsMetadat // IngesterServer is the server API for Ingester service. type IngesterServer interface { Push(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) - PushV2(context.Context, *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) + PushV2(context.Context, *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error) QueryStream(*QueryRequest, Ingester_QueryStreamServer) error QueryExemplars(context.Context, *ExemplarQueryRequest) (*ExemplarQueryResponse, error) LabelValues(context.Context, *LabelValuesRequest) (*LabelValuesResponse, error) @@ -3037,7 +3036,7 @@ type UnimplementedIngesterServer struct { func (*UnimplementedIngesterServer) Push(ctx context.Context, req *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Push not implemented") } -func (*UnimplementedIngesterServer) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) { +func (*UnimplementedIngesterServer) PushV2(ctx context.Context, req *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error) { return nil, status.Errorf(codes.Unimplemented, "method PushV2 not implemented") } func (*UnimplementedIngesterServer) QueryStream(req *QueryRequest, srv Ingester_QueryStreamServer) error { @@ -3097,7 +3096,7 @@ func _Ingester_Push_Handler(srv interface{}, ctx context.Context, dec func(inter } func _Ingester_PushV2_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(cortexpbv2.WriteRequest) + in := new(cortexpb.WriteRequestV2) if err := dec(in); err != nil { return nil, err } @@ -3109,7 +3108,7 @@ func _Ingester_PushV2_Handler(srv interface{}, ctx context.Context, dec func(int FullMethod: "/cortex.Ingester/PushV2", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IngesterServer).PushV2(ctx, req.(*cortexpbv2.WriteRequest)) + return srv.(IngesterServer).PushV2(ctx, req.(*cortexpb.WriteRequestV2)) } return interceptor(ctx, in, info, handler) } diff --git a/pkg/ingester/client/ingester.proto b/pkg/ingester/client/ingester.proto index 01f7be6f83..eee2e82b82 100644 --- a/pkg/ingester/client/ingester.proto +++ b/pkg/ingester/client/ingester.proto @@ -7,14 +7,13 @@ option go_package = "client"; import "gogoproto/gogo.proto"; import "github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto"; -import "github.com/cortexproject/cortex/pkg/cortexpbv2/cortexv2.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; service Ingester { rpc Push(cortexpb.WriteRequest) returns (cortexpb.WriteResponse) {}; - rpc PushV2(cortexpbv2.WriteRequest) returns (cortexpbv2.WriteResponse) {}; + rpc PushV2(cortexpb.WriteRequestV2) returns (cortexpb.WriteResponseV2) {}; rpc QueryStream(QueryRequest) returns (stream QueryStreamResponse) {}; rpc QueryExemplars(ExemplarQueryRequest) returns (ExemplarQueryResponse) {}; diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index e8891c6a59..e17bfd4b3a 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -44,7 +44,6 @@ import ( "github.com/cortexproject/cortex/pkg/chunk/encoding" "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/cortexpbv2" "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/querysharding" "github.com/cortexproject/cortex/pkg/ring" @@ -1057,7 +1056,7 @@ type extendedAppender interface { storage.GetRef } -func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) { +func (i *Ingester) PushV2(ctx context.Context, req *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error) { if err := i.checkRunning(); err != nil { return nil, err } @@ -1080,7 +1079,7 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c // NOTE: because we use `unsafe` in deserialisation, we must not // retain anything from `req` past the call to ReuseSlice - defer cortexpbv2.ReuseSlice(req.Timeseries) + defer cortexpb.ReuseSliceV2(req.Timeseries) userID, err := tenant.TenantID(ctx) if err != nil { @@ -1108,7 +1107,7 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c i.stoppedMtx.RUnlock() if err := db.acquireAppendLock(); err != nil { - return &cortexpbv2.WriteResponse{}, httpgrpc.Errorf(http.StatusServiceUnavailable, wrapWithUser(err, userID).Error()) + return &cortexpb.WriteResponseV2{}, httpgrpc.Errorf(http.StatusServiceUnavailable, wrapWithUser(err, userID).Error()) } defer db.releaseAppendLock() @@ -1220,7 +1219,7 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c // If the cached reference exists, we try to use it. if ref != 0 { - if _, err = app.Append(ref, copiedLabels, s.Timestamp, s.Value); err == nil { + if _, err = app.Append(ref, copiedLabels, s.TimestampMs, s.Value); err == nil { succeededSamplesCount++ continue } @@ -1230,7 +1229,7 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c copiedLabels = cortexpb.FromLabelAdaptersToLabelsWithCopy(seriesLabels) // Retain the reference in case there are multiple samples for the series. - if ref, err = app.Append(0, copiedLabels, s.Timestamp, s.Value); err == nil { + if ref, err = app.Append(0, copiedLabels, s.TimestampMs, s.Value); err == nil { succeededSamplesCount++ continue } @@ -1238,7 +1237,7 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c failedSamplesCount++ - if rollback := handleAppendFailure(err, s.Timestamp, seriesLabels, copiedLabels); !rollback { + if rollback := handleAppendFailure(err, s.TimestampMs, seriesLabels, copiedLabels); !rollback { continue } // The error looks an issue on our side, so we should rollback @@ -1258,13 +1257,13 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c ) if hp.GetCountFloat() > 0 { - fh = cortexpbv2.FloatHistogramProtoToFloatHistogram(hp) + fh = cortexpb.FloatHistogramProtoToFloatHistogram(hp) } else { - h = cortexpbv2.HistogramProtoToHistogram(hp) + h = cortexpb.HistogramProtoToHistogram(hp) } if ref != 0 { - if _, err = app.AppendHistogram(ref, copiedLabels, hp.Timestamp, h, fh); err == nil { + if _, err = app.AppendHistogram(ref, copiedLabels, hp.TimestampMs, h, fh); err == nil { succeededHistogramCount++ succeededSamplesCount++ continue @@ -1272,7 +1271,7 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c } else { // Copy the label set because both TSDB and the active series tracker may retain it. copiedLabels = cortexpb.FromLabelAdaptersToLabelsWithCopy(seriesLabels) - if ref, err = app.AppendHistogram(0, copiedLabels, hp.Timestamp, h, fh); err == nil { + if ref, err = app.AppendHistogram(0, copiedLabels, hp.TimestampMs, h, fh); err == nil { succeededHistogramCount++ succeededSamplesCount++ continue @@ -1281,7 +1280,7 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c failedSamplesCount++ - if rollback := handleAppendFailure(err, hp.Timestamp, seriesLabels, copiedLabels); !rollback { + if rollback := handleAppendFailure(err, hp.TimestampMs, seriesLabels, copiedLabels); !rollback { continue } // The error looks an issue on our side, so we should rollback @@ -1335,7 +1334,7 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c } } - if ts.Metadata.Type != cortexpbv2.METRIC_TYPE_UNSPECIFIED { + if ts.Metadata.Type != cortexpb.UNKNOWN { metaData := ts.Metadata.ToV1Metadata(tsLabels.Get(model.MetricNameLabel), req.Symbols) if err := i.appendMetadata(userID, metaData); err == nil { succeededMetadataCount++ @@ -1399,9 +1398,9 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c i.ingestionRate.Add(int64(succeededSamplesCount + succeededMetadataCount)) switch req.Source { - case cortexpbv2.RULE: + case cortexpb.RULE: db.ingestedRuleSamples.Add(int64(succeededSamplesCount)) - case cortexpbv2.API: + case cortexpb.API: fallthrough default: db.ingestedAPISamples.Add(int64(succeededSamplesCount)) @@ -1414,10 +1413,10 @@ func (i *Ingester) PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*c code = ve.code } level.Debug(logutil.WithContext(ctx, i.logger)).Log("msg", "partial failures to push", "totalSamples", succeededSamplesCount+failedSamplesCount, "failedSamples", failedSamplesCount, "firstPartialErr", firstPartialErr) - return &cortexpbv2.WriteResponse{}, httpgrpc.Errorf(code, wrapWithUser(firstPartialErr, userID).Error()) + return &cortexpb.WriteResponseV2{}, httpgrpc.Errorf(code, wrapWithUser(firstPartialErr, userID).Error()) } - writeResponse := &cortexpbv2.WriteResponse{ + writeResponse := &cortexpb.WriteResponseV2{ Samples: int64(succeededSamplesCount), Histograms: int64(succeededHistogramCount), Exemplars: int64(succeededExemplarsCount), diff --git a/pkg/ingester/ingester_prw2_test.go b/pkg/ingester/ingester_prw2_test.go index ac132527fb..cb1c51360c 100644 --- a/pkg/ingester/ingester_prw2_test.go +++ b/pkg/ingester/ingester_prw2_test.go @@ -42,10 +42,8 @@ import ( "golang.org/x/sync/errgroup" "google.golang.org/grpc" - "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/encoding" "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/cortexpbv2" "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/ring" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" @@ -68,12 +66,10 @@ func TestIngesterPRW2_Push(t *testing.T) { } userID := "test" - testHistogramV2 := cortexpbv2.HistogramToHistogramProto(10, tsdbutil.GenerateTestHistogram(1)) testHistogram := cortexpb.HistogramToHistogramProto(10, tsdbutil.GenerateTestHistogram(1)) - testFloatHistogramV2 := cortexpbv2.FloatHistogramToHistogramProto(11, tsdbutil.GenerateTestFloatHistogram(1)) testFloatHistogram := cortexpb.FloatHistogramToHistogramProto(11, tsdbutil.GenerateTestFloatHistogram(1)) tests := map[string]struct { - reqs []*cortexpbv2.WriteRequest + reqs []*cortexpb.WriteRequestV2 expectedErr error expectedIngested []cortexpb.TimeSeries expectedMetadataIngested []*cortexpb.MetricMetadata @@ -86,13 +82,13 @@ func TestIngesterPRW2_Push(t *testing.T) { disableNativeHistogram bool }{ "should record native histogram discarded": { - reqs: []*cortexpbv2.WriteRequest{ - cortexpbv2.ToWriteRequestV2( + reqs: []*cortexpb.WriteRequestV2{ + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 2, Timestamp: 10}}, - []cortexpbv2.Histogram{{Timestamp: 10}}, - []cortexpbv2.Metadata{{Type: cortexpbv2.METRIC_TYPE_GAUGE, HelpRef: 3}}, - cortexpbv2.API, + []cortexpb.Sample{{Value: 2, TimestampMs: 10}}, + []cortexpb.Histogram{{TimestampMs: 10}}, + []cortexpb.MetadataV2{{Type: cortexpb.GAUGE, HelpRef: 3}}, + cortexpb.API, "a help for metric_name_2"), }, expectedErr: nil, @@ -132,20 +128,20 @@ func TestIngesterPRW2_Push(t *testing.T) { `, }, "should succeed on valid series and metadata": { - reqs: []*cortexpbv2.WriteRequest{ - cortexpbv2.ToWriteRequestV2( + reqs: []*cortexpb.WriteRequestV2{ + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 1, Timestamp: 9}}, + []cortexpb.Sample{{Value: 1, TimestampMs: 9}}, nil, - []cortexpbv2.Metadata{{HelpRef: 3, Type: cortexpbv2.METRIC_TYPE_COUNTER}}, - cortexpbv2.API, + []cortexpb.MetadataV2{{HelpRef: 3, Type: cortexpb.COUNTER}}, + cortexpb.API, "a help for metric_name_1"), - cortexpbv2.ToWriteRequestV2( + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 2, Timestamp: 10}}, + []cortexpb.Sample{{Value: 2, TimestampMs: 10}}, nil, - []cortexpbv2.Metadata{{HelpRef: 3, Type: cortexpbv2.METRIC_TYPE_GAUGE}}, - cortexpbv2.API, + []cortexpb.MetadataV2{{HelpRef: 3, Type: cortexpb.GAUGE}}, + cortexpb.API, "a help for metric_name_2"), }, expectedErr: nil, @@ -202,21 +198,21 @@ func TestIngesterPRW2_Push(t *testing.T) { }, "should succeed on valid series with exemplars": { maxExemplars: 2, - reqs: []*cortexpbv2.WriteRequest{ + reqs: []*cortexpb.WriteRequestV2{ // Ingesting an exemplar requires a sample to create the series first - cortexpbv2.ToWriteRequestV2( + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 1, Timestamp: 9}}, + []cortexpb.Sample{{Value: 1, TimestampMs: 9}}, nil, nil, - cortexpbv2.API), + cortexpb.API), { Symbols: []string{"", "__name__", "test", "traceID", "123", "456"}, - Timeseries: []cortexpbv2.PreallocTimeseriesV2{ + Timeseries: []cortexpb.PreallocTimeseriesV2{ { - TimeSeries: &cortexpbv2.TimeSeries{ + TimeSeriesV2: &cortexpb.TimeSeriesV2{ LabelsRefs: []uint32{1, 2}, - Exemplars: []cortexpbv2.Exemplar{ + Exemplars: []cortexpb.ExemplarV2{ { LabelsRefs: []uint32{3, 4}, Timestamp: 1000, @@ -309,19 +305,19 @@ func TestIngesterPRW2_Push(t *testing.T) { }, "successful push, active series disabled": { disableActiveSeries: true, - reqs: []*cortexpbv2.WriteRequest{ - cortexpbv2.ToWriteRequestV2( + reqs: []*cortexpb.WriteRequestV2{ + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 1, Timestamp: 9}}, + []cortexpb.Sample{{Value: 1, TimestampMs: 9}}, nil, nil, - cortexpbv2.API), - cortexpbv2.ToWriteRequestV2( + cortexpb.API), + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 2, Timestamp: 10}}, + []cortexpb.Sample{{Value: 2, TimestampMs: 10}}, nil, nil, - cortexpbv2.API), + cortexpb.API), }, expectedErr: nil, expectedIngested: []cortexpb.TimeSeries{ @@ -349,21 +345,21 @@ func TestIngesterPRW2_Push(t *testing.T) { `, }, "ooo disabled, should soft fail on sample out of order": { - reqs: []*cortexpbv2.WriteRequest{ - cortexpbv2.ToWriteRequestV2( + reqs: []*cortexpb.WriteRequestV2{ + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 2, Timestamp: 10}}, + []cortexpb.Sample{{Value: 2, TimestampMs: 10}}, nil, nil, - cortexpbv2.API), - cortexpbv2.ToWriteRequestV2( + cortexpb.API), + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 1, Timestamp: 9}}, - []cortexpbv2.Histogram{ - cortexpbv2.HistogramToHistogramProto(9, tsdbutil.GenerateTestHistogram(1)), + []cortexpb.Sample{{Value: 1, TimestampMs: 9}}, + []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(9, tsdbutil.GenerateTestHistogram(1)), }, nil, - cortexpbv2.API), + cortexpb.API), }, expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(storage.ErrOutOfOrderSample, model.Time(9), cortexpb.FromLabelsToLabelAdapters(metricLabels)), userID).Error()), expectedIngested: []cortexpb.TimeSeries{ @@ -410,21 +406,21 @@ func TestIngesterPRW2_Push(t *testing.T) { `, }, "ooo disabled, should soft fail on sample out of bound": { - reqs: []*cortexpbv2.WriteRequest{ - cortexpbv2.ToWriteRequestV2( + reqs: []*cortexpb.WriteRequestV2{ + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 2, Timestamp: 1575043969}}, + []cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}}, nil, nil, - cortexpbv2.API), - cortexpbv2.ToWriteRequestV2( + cortexpb.API), + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 1, Timestamp: 1575043969 - (86400 * 1000)}}, - []cortexpbv2.Histogram{ - cortexpbv2.HistogramToHistogramProto(1575043969-(86400*1000), tsdbutil.GenerateTestHistogram(1)), + []cortexpb.Sample{{Value: 1, TimestampMs: 1575043969 - (86400 * 1000)}}, + []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(1575043969-(86400*1000), tsdbutil.GenerateTestHistogram(1)), }, nil, - cortexpbv2.API), + cortexpb.API), }, expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(storage.ErrOutOfBounds, model.Time(1575043969-(86400*1000)), cortexpb.FromLabelsToLabelAdapters(metricLabels)), userID).Error()), expectedIngested: []cortexpb.TimeSeries{ @@ -459,19 +455,19 @@ func TestIngesterPRW2_Push(t *testing.T) { `, }, "ooo enabled, should soft fail on sample too old": { - reqs: []*cortexpbv2.WriteRequest{ - cortexpbv2.ToWriteRequestV2( + reqs: []*cortexpb.WriteRequestV2{ + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 2, Timestamp: 1575043969}}, + []cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}}, nil, nil, - cortexpbv2.API), - cortexpbv2.ToWriteRequestV2( + cortexpb.API), + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 1, Timestamp: 1575043969 - (600 * 1000)}}, + []cortexpb.Sample{{Value: 1, TimestampMs: 1575043969 - (600 * 1000)}}, nil, nil, - cortexpbv2.API), + cortexpb.API), }, oooTimeWindow: 5 * time.Minute, expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(storage.ErrTooOldSample, model.Time(1575043969-(600*1000)), cortexpb.FromLabelsToLabelAdapters(metricLabels)), userID).Error()), @@ -510,19 +506,19 @@ func TestIngesterPRW2_Push(t *testing.T) { `, }, "ooo enabled, should succeed": { - reqs: []*cortexpbv2.WriteRequest{ - cortexpbv2.ToWriteRequestV2( + reqs: []*cortexpb.WriteRequestV2{ + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 2, Timestamp: 1575043969}}, + []cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}}, nil, nil, - cortexpbv2.API), - cortexpbv2.ToWriteRequestV2( + cortexpb.API), + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 1, Timestamp: 1575043969 - (60 * 1000)}}, + []cortexpb.Sample{{Value: 1, TimestampMs: 1575043969 - (60 * 1000)}}, nil, nil, - cortexpbv2.API), + cortexpb.API), }, oooTimeWindow: 5 * time.Minute, expectedIngested: []cortexpb.TimeSeries{ @@ -554,19 +550,19 @@ func TestIngesterPRW2_Push(t *testing.T) { `, }, "should soft fail on two different sample values at the same timestamp": { - reqs: []*cortexpbv2.WriteRequest{ - cortexpbv2.ToWriteRequestV2( + reqs: []*cortexpb.WriteRequestV2{ + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 2, Timestamp: 1575043969}}, + []cortexpb.Sample{{Value: 2, TimestampMs: 1575043969}}, nil, nil, - cortexpbv2.API), - cortexpbv2.ToWriteRequestV2( + cortexpb.API), + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 1, Timestamp: 1575043969}}, + []cortexpb.Sample{{Value: 1, TimestampMs: 1575043969}}, nil, nil, - cortexpbv2.API), + cortexpb.API), }, expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(storage.NewDuplicateFloatErr(1575043969, 2, 1), model.Time(1575043969), cortexpb.FromLabelsToLabelAdapters(metricLabels)), userID).Error()), expectedIngested: []cortexpb.TimeSeries{ @@ -602,16 +598,16 @@ func TestIngesterPRW2_Push(t *testing.T) { }, "should soft fail on exemplar with unknown series": { maxExemplars: 1, - reqs: []*cortexpbv2.WriteRequest{ + reqs: []*cortexpb.WriteRequestV2{ // Ingesting an exemplar requires a sample to create the series first // This is not done here. { Symbols: []string{"", "__name__", "test", "traceID", "123"}, - Timeseries: []cortexpbv2.PreallocTimeseriesV2{ + Timeseries: []cortexpb.PreallocTimeseriesV2{ { - TimeSeries: &cortexpbv2.TimeSeries{ + TimeSeriesV2: &cortexpb.TimeSeriesV2{ LabelsRefs: []uint32{1, 2}, - Exemplars: []cortexpbv2.Exemplar{ + Exemplars: []cortexpb.ExemplarV2{ { LabelsRefs: []uint32{3, 4}, Timestamp: 1000, @@ -679,13 +675,13 @@ func TestIngesterPRW2_Push(t *testing.T) { `, }, "should succeed when only native histogram present if enabled": { - reqs: []*cortexpbv2.WriteRequest{ - cortexpbv2.ToWriteRequestV2( + reqs: []*cortexpb.WriteRequestV2{ + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, nil, - []cortexpbv2.Histogram{testHistogramV2}, + []cortexpb.Histogram{testHistogram}, nil, - cortexpbv2.API), + cortexpb.API), }, expectedErr: nil, expectedIngested: []cortexpb.TimeSeries{ @@ -729,13 +725,13 @@ func TestIngesterPRW2_Push(t *testing.T) { `, }, "should succeed when only float native histogram present if enabled": { - reqs: []*cortexpbv2.WriteRequest{ - cortexpbv2.ToWriteRequestV2( + reqs: []*cortexpb.WriteRequestV2{ + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, nil, - []cortexpbv2.Histogram{testFloatHistogramV2}, + []cortexpb.Histogram{testFloatHistogram}, nil, - cortexpbv2.API), + cortexpb.API), }, expectedErr: nil, expectedIngested: []cortexpb.TimeSeries{ @@ -779,13 +775,13 @@ func TestIngesterPRW2_Push(t *testing.T) { `, }, "should fail to ingest histogram due to OOO native histogram. Sample and histogram has same timestamp but sample got ingested first": { - reqs: []*cortexpbv2.WriteRequest{ - cortexpbv2.ToWriteRequestV2( + reqs: []*cortexpb.WriteRequestV2{ + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 2, Timestamp: 10}}, - []cortexpbv2.Histogram{testHistogramV2}, + []cortexpb.Sample{{Value: 2, TimestampMs: 10}}, + []cortexpb.Histogram{testHistogram}, nil, - cortexpbv2.API), + cortexpb.API), }, expectedErr: nil, expectedIngested: []cortexpb.TimeSeries{ @@ -953,35 +949,35 @@ func TestIngesterPRW2_MetricLimitExceeded(t *testing.T) { userID := "1" labels1 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}} - sample1 := cortexpbv2.Sample{ - Timestamp: 0, - Value: 1, + sample1 := cortexpb.Sample{ + TimestampMs: 0, + Value: 1, } - sample2 := cortexpbv2.Sample{ - Timestamp: 1, - Value: 2, + sample2 := cortexpb.Sample{ + TimestampMs: 1, + Value: 2, } labels3 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "biz"}} - sample3 := cortexpbv2.Sample{ - Timestamp: 1, - Value: 3, + sample3 := cortexpb.Sample{ + TimestampMs: 1, + Value: 3, } // Append only one series and one metadata first, expect no error. ctx := user.InjectOrgID(context.Background(), userID) - _, err := ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2([]labels.Labels{labels1}, []cortexpbv2.Sample{sample1}, nil, []cortexpbv2.Metadata{{HelpRef: 5, Type: cortexpbv2.METRIC_TYPE_COUNTER}}, cortexpbv2.API, "a help for testmetric")) + _, err := ing.PushV2(ctx, cortexpb.ToWriteRequestV2([]labels.Labels{labels1}, []cortexpb.Sample{sample1}, nil, []cortexpb.MetadataV2{{HelpRef: 5, Type: cortexpb.COUNTER}}, cortexpb.API, "a help for testmetric")) require.NoError(t, err) testLimits := func() { // Append two series, expect series-exceeded error. - _, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2([]labels.Labels{labels1, labels3}, []cortexpbv2.Sample{sample2, sample3}, nil, nil, cortexpbv2.API)) + _, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2([]labels.Labels{labels1, labels3}, []cortexpb.Sample{sample2, sample3}, nil, nil, cortexpb.API)) httpResp, ok := httpgrpc.HTTPResponseFromError(err) require.True(t, ok, "returned error is not an httpgrpc response") assert.Equal(t, http.StatusBadRequest, int(httpResp.Code)) assert.Equal(t, wrapWithUser(makeMetricLimitError(perMetricSeriesLimit, labels3, ing.limiter.FormatError(userID, errMaxSeriesPerMetricLimitExceeded)), userID).Error(), string(httpResp.Body)) // Append two metadata for the same metric. Drop the second one, and expect no error since metadata is a best effort approach. - _, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2([]labels.Labels{labels1, labels3}, nil, nil, []cortexpbv2.Metadata{{HelpRef: 6, Type: cortexpbv2.METRIC_TYPE_COUNTER}, {HelpRef: 7, Type: cortexpbv2.METRIC_TYPE_COUNTER}}, cortexpbv2.API, "a help for testmetric", "a help for testmetric2")) + _, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2([]labels.Labels{labels1, labels3}, nil, nil, []cortexpb.MetadataV2{{HelpRef: 6, Type: cortexpb.COUNTER}, {HelpRef: 7, Type: cortexpb.COUNTER}}, cortexpb.API, "a help for testmetric", "a help for testmetric2")) require.NoError(t, err) // Read samples back via ingester queries. @@ -994,11 +990,11 @@ func TestIngesterPRW2_MetricLimitExceeded(t *testing.T) { Metric: cortexpb.FromLabelAdaptersToMetric(cortexpb.FromLabelsToLabelAdapters(labels1)), Values: []model.SamplePair{ { - Timestamp: model.Time(sample1.Timestamp), + Timestamp: model.Time(sample1.TimestampMs), Value: model.SampleValue(sample1.Value), }, { - Timestamp: model.Time(sample2.Timestamp), + Timestamp: model.Time(sample2.TimestampMs), Value: model.SampleValue(sample2.Value), }, }, @@ -1058,35 +1054,35 @@ func TestIngesterPRW2_UserLimitExceeded(t *testing.T) { userID := "1" // Series labels1 := labels.Labels{{Name: labels.MetricName, Value: "testmetric"}, {Name: "foo", Value: "bar"}} - sample1 := cortexpbv2.Sample{ - Timestamp: 0, - Value: 1, + sample1 := cortexpb.Sample{ + TimestampMs: 0, + Value: 1, } - sample2 := cortexpbv2.Sample{ - Timestamp: 1, - Value: 2, + sample2 := cortexpb.Sample{ + TimestampMs: 1, + Value: 2, } labels3 := labels.Labels{{Name: labels.MetricName, Value: "testmetric2"}, {Name: "foo", Value: "biz"}} - sample3 := cortexpbv2.Sample{ - Timestamp: 1, - Value: 3, + sample3 := cortexpb.Sample{ + TimestampMs: 1, + Value: 3, } // Append only one series and one metadata first, expect no error. ctx := user.InjectOrgID(context.Background(), userID) - _, err := ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2([]labels.Labels{labels1}, []cortexpbv2.Sample{sample1}, nil, []cortexpbv2.Metadata{{HelpRef: 5, Type: cortexpbv2.METRIC_TYPE_COUNTER}}, cortexpbv2.API, "a help for testmetric")) + _, err := ing.PushV2(ctx, cortexpb.ToWriteRequestV2([]labels.Labels{labels1}, []cortexpb.Sample{sample1}, nil, []cortexpb.MetadataV2{{HelpRef: 5, Type: cortexpb.COUNTER}}, cortexpb.API, "a help for testmetric")) require.NoError(t, err) testLimits := func() { // Append to two series, expect series-exceeded error. - _, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2([]labels.Labels{labels1, labels3}, []cortexpbv2.Sample{sample2, sample3}, nil, nil, cortexpbv2.API)) + _, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2([]labels.Labels{labels1, labels3}, []cortexpb.Sample{sample2, sample3}, nil, nil, cortexpb.API)) httpResp, ok := httpgrpc.HTTPResponseFromError(err) require.True(t, ok, "returned error is not an httpgrpc response") assert.Equal(t, http.StatusBadRequest, int(httpResp.Code)) assert.Equal(t, wrapWithUser(makeLimitError(perUserSeriesLimit, ing.limiter.FormatError(userID, errMaxSeriesPerUserLimitExceeded)), userID).Error(), string(httpResp.Body)) // Append two metadata, expect no error since metadata is a best effort approach. - _, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2([]labels.Labels{labels1, labels3}, nil, nil, []cortexpbv2.Metadata{{HelpRef: 7, Type: cortexpbv2.METRIC_TYPE_COUNTER}, {HelpRef: 8, Type: cortexpbv2.METRIC_TYPE_COUNTER}}, cortexpbv2.API, "a help for testmetric", "a help for testmetric2")) + _, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2([]labels.Labels{labels1, labels3}, nil, nil, []cortexpb.MetadataV2{{HelpRef: 7, Type: cortexpb.COUNTER}, {HelpRef: 8, Type: cortexpb.COUNTER}}, cortexpb.API, "a help for testmetric", "a help for testmetric2")) require.NoError(t, err) // Read samples back via ingester queries. @@ -1098,11 +1094,11 @@ func TestIngesterPRW2_UserLimitExceeded(t *testing.T) { Metric: cortexpb.FromLabelAdaptersToMetric(cortexpb.FromLabelsToLabelAdapters(labels1)), Values: []model.SamplePair{ { - Timestamp: model.Time(sample1.Timestamp), + Timestamp: model.Time(sample1.TimestampMs), Value: model.SampleValue(sample1.Value), }, { - Timestamp: model.Time(sample2.Timestamp), + Timestamp: model.Time(sample2.TimestampMs), Value: model.SampleValue(sample2.Value), }, }, @@ -1176,7 +1172,7 @@ func TestIngesterPRW2_PerLabelsetLimitExceeded(t *testing.T) { }) ctx := user.InjectOrgID(context.Background(), userID) - samples := []cortexpbv2.Sample{{Value: 2, Timestamp: 10}} + samples := []cortexpb.Sample{{Value: 2, TimestampMs: 10}} // Create first series within the limits for _, set := range limits.LimitsPerLabelSet { @@ -1185,8 +1181,8 @@ func TestIngesterPRW2_PerLabelsetLimitExceeded(t *testing.T) { lbls = append(lbls, lbl.Name, lbl.Value) } for i := 0; i < set.Limits.MaxSeries; i++ { - _, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2( - []labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpbv2.API)) + _, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2( + []labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpb.API)) require.NoError(t, err) } } @@ -1209,8 +1205,8 @@ func TestIngesterPRW2_PerLabelsetLimitExceeded(t *testing.T) { for _, lbl := range set.LabelSet { lbls = append(lbls, lbl.Name, lbl.Value) } - _, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2( - []labels.Labels{labels.FromStrings(append(lbls, "newLabel", "newValue")...)}, samples, nil, nil, cortexpbv2.API)) + _, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2( + []labels.Labels{labels.FromStrings(append(lbls, "newLabel", "newValue")...)}, samples, nil, nil, cortexpb.API)) httpResp, ok := httpgrpc.HTTPResponseFromError(err) require.True(t, ok, "returned error is not an httpgrpc response") assert.Equal(t, http.StatusBadRequest, int(httpResp.Code)) @@ -1288,22 +1284,22 @@ func TestIngesterPRW2_PerLabelsetLimitExceeded(t *testing.T) { // Adding 5 metrics with only 1 label for i := 0; i < 5; i++ { lbls := []string{labels.MetricName, "metric_name", "comp1", "compValue1"} - _, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2( - []labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpbv2.API)) + _, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2( + []labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpb.API)) require.NoError(t, err) } // Adding 2 metrics with both labels (still below the limit) lbls := []string{labels.MetricName, "metric_name", "comp1", "compValue1", "comp2", "compValue2"} for i := 0; i < 2; i++ { - _, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2( - []labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpbv2.API)) + _, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2( + []labels.Labels{labels.FromStrings(append(lbls, "extraLabel", fmt.Sprintf("extraValue%v", i))...)}, samples, nil, nil, cortexpb.API)) require.NoError(t, err) } // Now we should hit the limit as we already have 2 metrics with comp1=compValue1, comp2=compValue2 - _, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2( - []labels.Labels{labels.FromStrings(append(lbls, "newLabel", "newValue")...)}, samples, nil, nil, cortexpbv2.API)) + _, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2( + []labels.Labels{labels.FromStrings(append(lbls, "newLabel", "newValue")...)}, samples, nil, nil, cortexpb.API)) httpResp, ok := httpgrpc.HTTPResponseFromError(err) require.True(t, ok, "returned error is not an httpgrpc response") assert.Equal(t, http.StatusBadRequest, int(httpResp.Code)) @@ -1348,12 +1344,12 @@ func TestIngesterPRW2_PerLabelsetLimitExceeded(t *testing.T) { tenantLimits.setLimits(userID, &limits) lbls = []string{labels.MetricName, "metric_name", "comp2", "compValue2"} - _, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2( - []labels.Labels{labels.FromStrings(append(lbls, "extraLabel", "extraValueUpdate")...)}, samples, nil, nil, cortexpbv2.API)) + _, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2( + []labels.Labels{labels.FromStrings(append(lbls, "extraLabel", "extraValueUpdate")...)}, samples, nil, nil, cortexpb.API)) require.NoError(t, err) - _, err = ing.PushV2(ctx, cortexpbv2.ToWriteRequestV2( - []labels.Labels{labels.FromStrings(append(lbls, "extraLabel", "extraValueUpdate2")...)}, samples, nil, nil, cortexpbv2.API)) + _, err = ing.PushV2(ctx, cortexpb.ToWriteRequestV2( + []labels.Labels{labels.FromStrings(append(lbls, "extraLabel", "extraValueUpdate2")...)}, samples, nil, nil, cortexpb.API)) httpResp, ok = httpgrpc.HTTPResponseFromError(err) require.True(t, ok, "returned error is not an httpgrpc response") assert.Equal(t, http.StatusBadRequest, int(httpResp.Code)) @@ -1424,13 +1420,13 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) { metricLabels := cortexpb.FromLabelAdaptersToLabels(metricLabelAdapters) for _, tc := range []struct { name string - histograms []cortexpbv2.Histogram + histograms []cortexpb.Histogram expectedErr error }{ { name: "rejects histogram with NaN observations that has its Count (2) lower than the actual total of buckets (2 + 1)", - histograms: []cortexpbv2.Histogram{ - cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{ + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ ZeroCount: 2, Count: 2, Sum: math.NaN(), @@ -1442,8 +1438,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) { }, { name: "rejects histogram without NaN observations that has its Count (4) higher than the actual total of buckets (2 + 1)", - histograms: []cortexpbv2.Histogram{ - cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{ + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ ZeroCount: 2, Count: 4, Sum: 333, @@ -1455,8 +1451,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) { }, { name: "rejects histogram that has too few negative buckets", - histograms: []cortexpbv2.Histogram{ - cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{ + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, NegativeBuckets: []int64{}, }), @@ -1465,8 +1461,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) { }, { name: "rejects histogram that has too few positive buckets", - histograms: []cortexpbv2.Histogram{ - cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{ + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, PositiveBuckets: []int64{}, }), @@ -1475,8 +1471,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) { }, { name: "rejects histogram that has too many negative buckets", - histograms: []cortexpbv2.Histogram{ - cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{ + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ NegativeSpans: []histogram.Span{{Offset: 0, Length: 1}}, NegativeBuckets: []int64{1, 2}, }), @@ -1485,8 +1481,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) { }, { name: "rejects histogram that has too many positive buckets", - histograms: []cortexpbv2.Histogram{ - cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{ + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ PositiveSpans: []histogram.Span{{Offset: 0, Length: 1}}, PositiveBuckets: []int64{1, 2}, }), @@ -1495,8 +1491,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) { }, { name: "rejects a histogram that has a negative span with a negative offset", - histograms: []cortexpbv2.Histogram{ - cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{ + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}}, NegativeBuckets: []int64{1, 2}, }), @@ -1505,8 +1501,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) { }, { name: "rejects a histogram that has a positive span with a negative offset", - histograms: []cortexpbv2.Histogram{ - cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{ + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ PositiveSpans: []histogram.Span{{Offset: -1, Length: 1}, {Offset: -1, Length: 1}}, PositiveBuckets: []int64{1, 2}, }), @@ -1515,8 +1511,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) { }, { name: "rejects a histogram that has a negative span with a negative count", - histograms: []cortexpbv2.Histogram{ - cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{ + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}}, NegativeBuckets: []int64{-1}, }), @@ -1525,8 +1521,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) { }, { name: "rejects a histogram that has a positive span with a negative count", - histograms: []cortexpbv2.Histogram{ - cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{ + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ PositiveSpans: []histogram.Span{{Offset: -1, Length: 1}}, PositiveBuckets: []int64{-1}, }), @@ -1535,8 +1531,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) { }, { name: "rejects a histogram that has a lower count than count in buckets", - histograms: []cortexpbv2.Histogram{ - cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{ + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ Count: 0, NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}}, PositiveSpans: []histogram.Span{{Offset: -1, Length: 1}}, @@ -1548,8 +1544,8 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) { }, { name: "rejects a histogram that doesn't count the zero bucket in its count", - histograms: []cortexpbv2.Histogram{ - cortexpbv2.HistogramToHistogramProto(10, &histogram.Histogram{ + histograms: []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(10, &histogram.Histogram{ Count: 2, ZeroCount: 1, NegativeSpans: []histogram.Span{{Offset: -1, Length: 1}}, @@ -1581,7 +1577,7 @@ func TestIngesterPRW2_PushNativeHistogramErrors(t *testing.T) { return i.lifecycler.GetState() }) - req := cortexpbv2.ToWriteRequestV2([]labels.Labels{metricLabels}, nil, tc.histograms, nil, cortexpbv2.API) + req := cortexpb.ToWriteRequestV2([]labels.Labels{metricLabels}, nil, tc.histograms, nil, cortexpb.API) // Push timeseries _, err = i.PushV2(ctx, req) assert.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(wrappedTSDBIngestErr(tc.expectedErr, model.Time(10), metricLabelAdapters), userID).Error()), err) @@ -1622,19 +1618,19 @@ func TestIngesterPRW2_Push_ShouldCorrectlyTrackMetricsInMultiTenantScenario(t *t // Push timeseries for each user for _, userID := range []string{"test-1", "test-2"} { - reqs := []*cortexpbv2.WriteRequest{ - cortexpbv2.ToWriteRequestV2( + reqs := []*cortexpb.WriteRequestV2{ + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 1, Timestamp: 9}}, + []cortexpb.Sample{{Value: 1, TimestampMs: 9}}, nil, nil, - cortexpbv2.API), - cortexpbv2.ToWriteRequestV2( + cortexpb.API), + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 2, Timestamp: 10}}, + []cortexpb.Sample{{Value: 2, TimestampMs: 10}}, nil, nil, - cortexpbv2.API), + cortexpb.API), } for _, req := range reqs { @@ -1706,19 +1702,19 @@ func TestIngesterPRW2_Push_DecreaseInactiveSeries(t *testing.T) { // Push timeseries for each user for _, userID := range []string{"test-1", "test-2"} { - reqs := []*cortexpbv2.WriteRequest{ - cortexpbv2.ToWriteRequestV2( + reqs := []*cortexpb.WriteRequestV2{ + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 1, Timestamp: 9}}, + []cortexpb.Sample{{Value: 1, TimestampMs: 9}}, nil, nil, - cortexpbv2.API), - cortexpbv2.ToWriteRequestV2( + cortexpb.API), + cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 2, Timestamp: 10}}, + []cortexpb.Sample{{Value: 2, TimestampMs: 10}}, nil, nil, - cortexpbv2.API), + cortexpb.API), } for _, req := range reqs { @@ -1781,12 +1777,12 @@ func benchmarkIngesterPRW2Push(b *testing.B, limits validation.Limits, errorsExp metricLabels := cortexpb.FromLabelAdaptersToLabels(metricLabelAdapters) startTime := util.TimeToMillis(time.Now()) - currTimeReq := cortexpbv2.ToWriteRequestV2( + currTimeReq := cortexpb.ToWriteRequestV2( []labels.Labels{metricLabels}, - []cortexpbv2.Sample{{Value: 1, Timestamp: startTime}}, + []cortexpb.Sample{{Value: 1, TimestampMs: startTime}}, nil, nil, - cortexpbv2.API) + cortexpb.API) _, err = ingester.PushV2(ctx, currTimeReq) require.NoError(b, err) @@ -1795,16 +1791,16 @@ func benchmarkIngesterPRW2Push(b *testing.B, limits validation.Limits, errorsExp samples = 10 ) - allLabels, allSamples := benchmarkDataV2(series) + allLabels, allSamples := benchmarkData(series) b.ResetTimer() for iter := 0; iter < b.N; iter++ { // Bump the timestamp on each of our test samples each time round the loop for j := 0; j < samples; j++ { for i := range allSamples { - allSamples[i].Timestamp = startTime + int64(iter*samples+j+1) + allSamples[i].TimestampMs = startTime + int64(iter*samples+j+1) } - _, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(allLabels, allSamples, nil, nil, cortexpbv2.API)) + _, err := ingester.PushV2(ctx, cortexpb.ToWriteRequestV2(allLabels, allSamples, nil, nil, cortexpb.API)) if !errorsExpected { require.NoError(b, err) } @@ -1846,26 +1842,26 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) { // If this returns false, test is skipped. prepareConfig func(limits *validation.Limits, instanceLimits *InstanceLimits) bool beforeBenchmark func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) - runBenchmark func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) + runBenchmark func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) }{ "out of bound samples": { prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool { return true }, beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) { // Push a single time series to set the TSDB min time. - currTimeReq := cortexpbv2.ToWriteRequestV2( + currTimeReq := cortexpb.ToWriteRequestV2( []labels.Labels{{{Name: labels.MetricName, Value: metricName}}}, - []cortexpbv2.Sample{{Value: 1, Timestamp: util.TimeToMillis(time.Now())}}, + []cortexpb.Sample{{Value: 1, TimestampMs: util.TimeToMillis(time.Now())}}, nil, nil, - cortexpbv2.API) + cortexpb.API) _, err := ingester.PushV2(ctx, currTimeReq) require.NoError(b, err) }, - runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) { + runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { expectedErr := storage.ErrOutOfBounds.Error() // Push out of bound samples. for n := 0; n < b.N; n++ { - _, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)) // nolint:errcheck + _, err := ingester.PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API)) // nolint:errcheck verifyErrorString(b, err, expectedErr) } @@ -1876,18 +1872,18 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) { beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) { // For each series, push a single sample with a timestamp greater than next pushes. for i := 0; i < numSeriesPerRequest; i++ { - currTimeReq := cortexpbv2.ToWriteRequestV2( + currTimeReq := cortexpb.ToWriteRequestV2( []labels.Labels{{{Name: labels.MetricName, Value: metricName}, {Name: "cardinality", Value: strconv.Itoa(i)}}}, - []cortexpbv2.Sample{{Value: 1, Timestamp: sampleTimestamp + 1}}, + []cortexpb.Sample{{Value: 1, TimestampMs: sampleTimestamp + 1}}, nil, nil, - cortexpbv2.API) + cortexpb.API) _, err := ingester.PushV2(ctx, currTimeReq) require.NoError(b, err) } }, - runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) { + runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { expectedErr := storage.ErrOutOfOrderSample.Error() st := writev2.NewSymbolTable() @@ -1896,7 +1892,7 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) { } // Push out of order samples. for n := 0; n < b.N; n++ { - _, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)) // nolint:errcheck + _, err := ingester.PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API)) // nolint:errcheck verifyErrorString(b, err, expectedErr) } @@ -1909,16 +1905,16 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) { }, beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) { // Push a series with a metric name different than the one used during the benchmark. - currTimeReq := cortexpbv2.ToWriteRequestV2( + currTimeReq := cortexpb.ToWriteRequestV2( []labels.Labels{labels.FromStrings(labels.MetricName, "another")}, - []cortexpbv2.Sample{{Value: 1, Timestamp: sampleTimestamp + 1}}, + []cortexpb.Sample{{Value: 1, TimestampMs: sampleTimestamp + 1}}, nil, nil, - cortexpbv2.API) + cortexpb.API) _, err := ingester.PushV2(ctx, currTimeReq) require.NoError(b, err) }, - runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) { + runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { // Push series with a different name than the one already pushed. st := writev2.NewSymbolTable() @@ -1926,7 +1922,7 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) { st.SymbolizeLabels(lbs, nil) } for n := 0; n < b.N; n++ { - _, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)) // nolint:errcheck + _, err := ingester.PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API)) // nolint:errcheck verifyErrorString(b, err, "per-user series limit") } }, @@ -1938,23 +1934,23 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) { }, beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) { // Push a series with the same metric name but different labels than the one used during the benchmark. - currTimeReq := cortexpbv2.ToWriteRequestV2( + currTimeReq := cortexpb.ToWriteRequestV2( []labels.Labels{labels.FromStrings(labels.MetricName, metricName, "cardinality", "another")}, - []cortexpbv2.Sample{{Value: 1, Timestamp: sampleTimestamp + 1}}, + []cortexpb.Sample{{Value: 1, TimestampMs: sampleTimestamp + 1}}, nil, nil, - cortexpbv2.API) + cortexpb.API) _, err := ingester.PushV2(ctx, currTimeReq) require.NoError(b, err) }, - runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) { + runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { st := writev2.NewSymbolTable() for _, lbs := range metrics { st.SymbolizeLabels(lbs, nil) } // Push series with different labels than the one already pushed. for n := 0; n < b.N; n++ { - _, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)) // nolint:errcheck + _, err := ingester.PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API)) // nolint:errcheck verifyErrorString(b, err, "per-metric series limit") } }, @@ -1974,14 +1970,14 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) { ingester.ingestionRate.Tick() }, - runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) { + runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { st := writev2.NewSymbolTable() for _, lbs := range metrics { st.SymbolizeLabels(lbs, nil) } // Push series with different labels than the one already pushed. for n := 0; n < b.N; n++ { - _, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)) + _, err := ingester.PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API)) verifyErrorString(b, err, "push rate reached") } }, @@ -2000,14 +1996,14 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) { _, err := ingester.PushV2(ctx, generateSamplesForLabelV2(labels.FromStrings(labels.MetricName, "test"), 10000)) require.NoError(b, err) }, - runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) { + runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { st := writev2.NewSymbolTable() for _, lbs := range metrics { st.SymbolizeLabels(lbs, nil) } // Push series with different labels than the one already pushed. for n := 0; n < b.N; n++ { - _, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)) + _, err := ingester.PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API)) verifyErrorString(b, err, "max tenants limit reached") } }, @@ -2024,13 +2020,13 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) { _, err := ingester.PushV2(ctx, generateSamplesForLabelV2(labels.FromStrings(labels.MetricName, "test"), 10000)) require.NoError(b, err) }, - runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) { + runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { st := writev2.NewSymbolTable() for _, lbs := range metrics { st.SymbolizeLabels(lbs, nil) } for n := 0; n < b.N; n++ { - _, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)) + _, err := ingester.PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API)) verifyErrorString(b, err, "max series limit reached") } }, @@ -2046,13 +2042,13 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) { beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) { ingester.inflightPushRequests.Inc() }, - runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpbv2.Sample) { + runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []cortexpb.Sample) { st := writev2.NewSymbolTable() for _, lbs := range metrics { st.SymbolizeLabels(lbs, nil) } for n := 0; n < b.N; n++ { - _, err := ingester.PushV2(ctx, cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API)) + _, err := ingester.PushV2(ctx, cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API)) verifyErrorString(b, err, "too many inflight push requests") } }, @@ -2100,10 +2096,10 @@ func Benchmark_IngesterPRW2_PushOnError(b *testing.B) { // Prepare the request. metrics := make([]labels.Labels, 0, scenario.numSeriesPerRequest) - samples := make([]cortexpbv2.Sample, 0, scenario.numSeriesPerRequest) + samples := make([]cortexpb.Sample, 0, scenario.numSeriesPerRequest) for i := 0; i < scenario.numSeriesPerRequest; i++ { metrics = append(metrics, labels.Labels{{Name: labels.MetricName, Value: metricName}, {Name: "cardinality", Value: strconv.Itoa(i)}}) - samples = append(samples, cortexpbv2.Sample{Value: float64(i), Timestamp: sampleTimestamp}) + samples = append(samples, cortexpb.Sample{Value: float64(i), TimestampMs: sampleTimestamp}) } // Run the benchmark. @@ -2459,7 +2455,7 @@ func TestIngesterPRW2_Push_ShouldNotCreateTSDBIfNotInActiveState(t *testing.T) { // Mock request userID := "test" ctx := user.InjectOrgID(context.Background(), userID) - req := &cortexpbv2.WriteRequest{} + req := &cortexpb.WriteRequestV2{} res, err := i.PushV2(ctx, req) assert.Equal(t, wrapWithUser(fmt.Errorf(errTSDBCreateIncompatibleState, "PENDING"), userID).Error(), err.Error()) @@ -2739,7 +2735,7 @@ func TestIngesterPRW2_QueryStream(t *testing.T) { ctx := user.InjectOrgID(context.Background(), userID) lbls := labels.Labels{{Name: labels.MetricName, Value: "foo"}} var ( - req *cortexpbv2.WriteRequest + req *cortexpb.WriteRequestV2 expectedResponseChunks *client.QueryStreamResponse ) switch enc { @@ -2822,12 +2818,12 @@ func TestIngesterPRW2_QueryStreamManySamplesChunks(t *testing.T) { ctx := user.InjectOrgID(context.Background(), userID) const samplesCount = 1000000 - samples := make([]cortexpbv2.Sample, 0, samplesCount) + samples := make([]cortexpb.Sample, 0, samplesCount) for i := 0; i < samplesCount; i++ { - samples = append(samples, cortexpbv2.Sample{ - Value: float64(i), - Timestamp: int64(i), + samples = append(samples, cortexpb.Sample{ + Value: float64(i), + TimestampMs: int64(i), }) } @@ -2928,12 +2924,12 @@ func benchmarkQueryStreamV2(b *testing.B) { ctx := user.InjectOrgID(context.Background(), userID) const samplesCount = 1000 - samples := make([]cortexpbv2.Sample, 0, samplesCount) + samples := make([]cortexpb.Sample, 0, samplesCount) for i := 0; i < samplesCount; i++ { - samples = append(samples, cortexpbv2.Sample{ - Value: float64(i), - Timestamp: int64(i), + samples = append(samples, cortexpb.Sample{ + Value: float64(i), + TimestampMs: int64(i), }) } @@ -3995,22 +3991,22 @@ func TestIngesterPRW2_NoFlushWithInFlightRequest(t *testing.T) { func TestIngesterPRW2_PushInstanceLimits(t *testing.T) { tests := map[string]struct { limits InstanceLimits - reqs map[string][]*cortexpbv2.WriteRequest + reqs map[string][]*cortexpb.WriteRequestV2 expectedErr error expectedErrType interface{} }{ "should succeed creating one user and series": { limits: InstanceLimits{MaxInMemorySeries: 1, MaxInMemoryTenants: 1}, - reqs: map[string][]*cortexpbv2.WriteRequest{ + reqs: map[string][]*cortexpb.WriteRequestV2{ "test": { - cortexpbv2.ToWriteRequestV2( + cortexpb.ToWriteRequestV2( []labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test"}})}, - []cortexpbv2.Sample{{Value: 1, Timestamp: 9}}, + []cortexpb.Sample{{Value: 1, TimestampMs: 9}}, nil, - []cortexpbv2.Metadata{ - {Type: cortexpbv2.METRIC_TYPE_COUNTER, HelpRef: 3}, + []cortexpb.MetadataV2{ + {Type: cortexpb.COUNTER, HelpRef: 3}, }, - cortexpbv2.API, + cortexpb.API, "a help for metric_name_1"), }, }, @@ -4019,21 +4015,21 @@ func TestIngesterPRW2_PushInstanceLimits(t *testing.T) { "should fail creating two series": { limits: InstanceLimits{MaxInMemorySeries: 1, MaxInMemoryTenants: 1}, - reqs: map[string][]*cortexpbv2.WriteRequest{ + reqs: map[string][]*cortexpb.WriteRequestV2{ "test": { - cortexpbv2.ToWriteRequestV2( + cortexpb.ToWriteRequestV2( []labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test1"}})}, - []cortexpbv2.Sample{{Value: 1, Timestamp: 9}}, + []cortexpb.Sample{{Value: 1, TimestampMs: 9}}, nil, nil, - cortexpbv2.API), + cortexpb.API), - cortexpbv2.ToWriteRequestV2( + cortexpb.ToWriteRequestV2( []labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test2"}})}, // another series - []cortexpbv2.Sample{{Value: 1, Timestamp: 10}}, + []cortexpb.Sample{{Value: 1, TimestampMs: 10}}, nil, nil, - cortexpbv2.API), + cortexpb.API), }, }, @@ -4042,23 +4038,23 @@ func TestIngesterPRW2_PushInstanceLimits(t *testing.T) { "should fail creating two users": { limits: InstanceLimits{MaxInMemorySeries: 1, MaxInMemoryTenants: 1}, - reqs: map[string][]*cortexpbv2.WriteRequest{ + reqs: map[string][]*cortexpb.WriteRequestV2{ "user1": { - cortexpbv2.ToWriteRequestV2( + cortexpb.ToWriteRequestV2( []labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test1"}})}, - []cortexpbv2.Sample{{Value: 1, Timestamp: 9}}, + []cortexpb.Sample{{Value: 1, TimestampMs: 9}}, nil, nil, - cortexpbv2.API), + cortexpb.API), }, "user2": { - cortexpbv2.ToWriteRequestV2( + cortexpb.ToWriteRequestV2( []labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test2"}})}, // another series - []cortexpbv2.Sample{{Value: 1, Timestamp: 10}}, + []cortexpb.Sample{{Value: 1, TimestampMs: 10}}, nil, nil, - cortexpbv2.API), + cortexpb.API), }, }, expectedErr: wrapWithUser(errMaxUsersLimitReached, "user2"), @@ -4066,21 +4062,21 @@ func TestIngesterPRW2_PushInstanceLimits(t *testing.T) { "should fail pushing samples in two requests due to rate limit": { limits: InstanceLimits{MaxInMemorySeries: 1, MaxInMemoryTenants: 1, MaxIngestionRate: 0.001}, - reqs: map[string][]*cortexpbv2.WriteRequest{ + reqs: map[string][]*cortexpb.WriteRequestV2{ "user1": { - cortexpbv2.ToWriteRequestV2( + cortexpb.ToWriteRequestV2( []labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test1"}})}, - []cortexpbv2.Sample{{Value: 1, Timestamp: 9}}, + []cortexpb.Sample{{Value: 1, TimestampMs: 9}}, nil, nil, - cortexpbv2.API), + cortexpb.API), - cortexpbv2.ToWriteRequestV2( + cortexpb.ToWriteRequestV2( []labels.Labels{cortexpb.FromLabelAdaptersToLabels([]cortexpb.LabelAdapter{{Name: labels.MetricName, Value: "test1"}})}, - []cortexpbv2.Sample{{Value: 1, Timestamp: 10}}, + []cortexpb.Sample{{Value: 1, TimestampMs: 10}}, nil, nil, - cortexpbv2.API), + cortexpb.API), }, }, expectedErr: errMaxSamplesPushRateLimitReached, @@ -4229,30 +4225,30 @@ func TestIngesterPRW2_QueryExemplar_MaxInflightQueryRequest(t *testing.T) { require.Equal(t, err, errTooManyInflightQueryRequests) } -func generateSamplesForLabelV2(lbs labels.Labels, count int) *cortexpbv2.WriteRequest { +func generateSamplesForLabelV2(lbs labels.Labels, count int) *cortexpb.WriteRequestV2 { var lbls = make([]labels.Labels, 0, count) - var samples = make([]cortexpbv2.Sample, 0, count) + var samples = make([]cortexpb.Sample, 0, count) for i := 0; i < count; i++ { - samples = append(samples, cortexpbv2.Sample{ - Value: float64(i), - Timestamp: int64(i), + samples = append(samples, cortexpb.Sample{ + Value: float64(i), + TimestampMs: int64(i), }) lbls = append(lbls, lbs) } - return cortexpbv2.ToWriteRequestV2(lbls, samples, nil, nil, cortexpbv2.API) + return cortexpb.ToWriteRequestV2(lbls, samples, nil, nil, cortexpb.API) } -func mockWriteRequestWithMetadataV2(t *testing.T, lbls labels.Labels, value float64, timestamp int64, metadata cortexpbv2.Metadata, additionalSymbols ...string) (*cortexpbv2.WriteRequest, *client.QueryStreamResponse) { - samples := []cortexpbv2.Sample{ +func mockWriteRequestWithMetadataV2(t *testing.T, lbls labels.Labels, value float64, timestamp int64, metadata cortexpb.MetadataV2, additionalSymbols ...string) (*cortexpb.WriteRequestV2, *client.QueryStreamResponse) { + samples := []cortexpb.Sample{ { - Timestamp: timestamp, - Value: value, + TimestampMs: timestamp, + Value: value, }, } - req := cortexpbv2.ToWriteRequestV2([]labels.Labels{lbls}, samples, nil, []cortexpbv2.Metadata{metadata}, cortexpbv2.API, additionalSymbols...) + req := cortexpb.ToWriteRequestV2([]labels.Labels{lbls}, samples, nil, []cortexpb.MetadataV2{metadata}, cortexpb.API, additionalSymbols...) chunk := chunkenc.NewXORChunk() app, err := chunk.Appender() @@ -4279,23 +4275,23 @@ func mockWriteRequestWithMetadataV2(t *testing.T, lbls labels.Labels, value floa return req, expectedQueryStreamResChunks } -func mockHistogramWriteRequestV2(t *testing.T, lbls labels.Labels, value int, timestampMs int64, float bool) (*cortexpbv2.WriteRequest, *client.QueryStreamResponse) { +func mockHistogramWriteRequestV2(t *testing.T, lbls labels.Labels, value int, timestampMs int64, float bool) (*cortexpb.WriteRequestV2, *client.QueryStreamResponse) { var ( - histograms []cortexpbv2.Histogram + histograms []cortexpb.Histogram h *histogram.Histogram fh *histogram.FloatHistogram c chunkenc.Chunk ) if float { fh = tsdbutil.GenerateTestFloatHistogram(value) - histograms = []cortexpbv2.Histogram{ - cortexpbv2.FloatHistogramToHistogramProto(timestampMs, fh), + histograms = []cortexpb.Histogram{ + cortexpb.FloatHistogramToHistogramProto(timestampMs, fh), } c = chunkenc.NewFloatHistogramChunk() } else { h = tsdbutil.GenerateTestHistogram(value) - histograms = []cortexpbv2.Histogram{ - cortexpbv2.HistogramToHistogramProto(timestampMs, h), + histograms = []cortexpb.Histogram{ + cortexpb.HistogramToHistogramProto(timestampMs, h), } c = chunkenc.NewHistogramChunk() } @@ -4310,7 +4306,7 @@ func mockHistogramWriteRequestV2(t *testing.T, lbls labels.Labels, value int, ti require.NoError(t, err) c.Compact() - req := cortexpbv2.ToWriteRequestV2([]labels.Labels{lbls}, nil, histograms, nil, cortexpbv2.API) + req := cortexpb.ToWriteRequestV2([]labels.Labels{lbls}, nil, histograms, nil, cortexpb.API) enc := int32(encoding.PrometheusHistogramChunk) if float { enc = int32(encoding.PrometheusFloatHistogramChunk) @@ -4334,15 +4330,15 @@ func mockHistogramWriteRequestV2(t *testing.T, lbls labels.Labels, value int, ti return req, expectedQueryStreamResChunks } -func mockWriteRequestV2(t *testing.T, lbls labels.Labels, value float64, timestamp int64) (*cortexpbv2.WriteRequest, *client.QueryStreamResponse) { - samples := []cortexpbv2.Sample{ +func mockWriteRequestV2(t *testing.T, lbls labels.Labels, value float64, timestamp int64) (*cortexpb.WriteRequestV2, *client.QueryStreamResponse) { + samples := []cortexpb.Sample{ { - Timestamp: timestamp, - Value: value, + TimestampMs: timestamp, + Value: value, }, } - req := cortexpbv2.ToWriteRequestV2([]labels.Labels{lbls}, samples, nil, nil, cortexpbv2.API) + req := cortexpb.ToWriteRequestV2([]labels.Labels{lbls}, samples, nil, nil, cortexpb.API) chunk := chunkenc.NewXORChunk() app, err := chunk.Appender() @@ -4371,8 +4367,8 @@ func mockWriteRequestV2(t *testing.T, lbls labels.Labels, value float64, timesta func pushSingleSampleWithMetadataV2(t *testing.T, i *Ingester) { ctx := user.InjectOrgID(context.Background(), userID) - metadata := cortexpbv2.Metadata{ - Type: cortexpbv2.METRIC_TYPE_COUNTER, + metadata := cortexpb.MetadataV2{ + Type: cortexpb.COUNTER, HelpRef: 3, UnitRef: 0, } @@ -4389,16 +4385,16 @@ func pushSingleSampleAtTimeV2(t *testing.T, i *Ingester, ts int64) { require.NoError(t, err) } -func writeRequestSingleSeriesV2(lbls labels.Labels, samples []cortexpbv2.Sample) *cortexpbv2.WriteRequest { - req := &cortexpbv2.WriteRequest{ - Source: cortexpbv2.API, +func writeRequestSingleSeriesV2(lbls labels.Labels, samples []cortexpb.Sample) *cortexpb.WriteRequestV2 { + req := &cortexpb.WriteRequestV2{ + Source: cortexpb.API, } st := writev2.NewSymbolTable() - ts := cortexpbv2.TimeSeries{} + ts := cortexpb.TimeSeriesV2{} ts.Samples = samples ts.LabelsRefs = st.SymbolizeLabels(lbls, nil) - req.Timeseries = append(req.Timeseries, cortexpbv2.PreallocTimeseriesV2{TimeSeries: &ts}) + req.Timeseries = append(req.Timeseries, cortexpb.PreallocTimeseriesV2{TimeSeriesV2: &ts}) req.Symbols = st.Symbols() return req @@ -4430,21 +4426,21 @@ func createIngesterWithSeriesV2(t testing.TB, userID string, numSeries, numSampl // Generate metrics and samples (1 for each series). metrics := make([]labels.Labels, 0, batchSize) - samples := make([]cortexpbv2.Sample, 0, batchSize) + samples := make([]cortexpb.Sample, 0, batchSize) for s := 0; s < batchSize; s++ { metrics = append(metrics, labels.Labels{ {Name: labels.MetricName, Value: fmt.Sprintf("test_%d", o+s)}, }) - samples = append(samples, cortexpbv2.Sample{ - Timestamp: ts, - Value: 1, + samples = append(samples, cortexpb.Sample{ + TimestampMs: ts, + Value: 1, }) } // Send metrics to the ingester. - req := cortexpbv2.ToWriteRequestV2(metrics, samples, nil, nil, cortexpbv2.API) + req := cortexpb.ToWriteRequestV2(metrics, samples, nil, nil, cortexpb.API) _, err := i.PushV2(ctx, req) require.NoError(t, err) } @@ -4452,17 +4448,3 @@ func createIngesterWithSeriesV2(t testing.TB, userID string, numSeries, numSampl return i } - -func benchmarkDataV2(nSeries int) (allLabels []labels.Labels, allSamples []cortexpbv2.Sample) { - for j := 0; j < nSeries; j++ { - labels := chunk.BenchmarkLabels.Copy() - for i := range labels { - if labels[i].Name == "cpu" { - labels[i].Value = fmt.Sprintf("cpu%02d", j) - } - } - allLabels = append(allLabels, labels) - allSamples = append(allSamples, cortexpbv2.Sample{Timestamp: 0, Value: float64(j)}) - } - return -} diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index 4a2916c2af..c4fa7e92f6 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -22,7 +22,6 @@ import ( "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/cortexpbv2" "github.com/cortexproject/cortex/pkg/querier" "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/ring/client" @@ -34,7 +33,7 @@ import ( // Pusher is an ingester server that accepts pushes. type Pusher interface { Push(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) - PushV2(ctx context.Context, req *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) + PushV2(ctx context.Context, req *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error) } type PusherAppender struct { diff --git a/pkg/util/push/otlp_test.go b/pkg/util/push/otlp_test.go index 40b42f3fee..6032130283 100644 --- a/pkg/util/push/otlp_test.go +++ b/pkg/util/push/otlp_test.go @@ -352,7 +352,7 @@ func generateOTLPWriteRequest(t *testing.T) pmetricotlp.ExportRequest { return pmetricotlp.NewExportRequestFromMetrics(d) } -func verifyOTLPWriteRequestHandler(t *testing.T, expectSource cortexpb.WriteRequest_SourceEnum) func(ctx context.Context, request *cortexpb.WriteRequest) (response *cortexpb.WriteResponse, err error) { +func verifyOTLPWriteRequestHandler(t *testing.T, expectSource cortexpb.SourceEnum) func(ctx context.Context, request *cortexpb.WriteRequest) (response *cortexpb.WriteResponse, err error) { t.Helper() return func(ctx context.Context, request *cortexpb.WriteRequest) (response *cortexpb.WriteResponse, err error) { assert.Len(t, request.Timeseries, 13) // 1 (target_info) + 1 (counter) + 1 (gauge) + 7 (hist_bucket) + 2 (hist_sum, hist_count) + 1 (exponential histogram) diff --git a/pkg/util/push/push.go b/pkg/util/push/push.go index c115f559ed..d8cbd79e88 100644 --- a/pkg/util/push/push.go +++ b/pkg/util/push/push.go @@ -14,7 +14,6 @@ import ( "github.com/weaveworks/common/middleware" "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/cortexpbv2" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/log" ) @@ -36,7 +35,7 @@ const ( type Func func(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) // FuncV2 defines the type of the pushV2. It is similar to http.HandlerFunc. -type FuncV2 func(ctx context.Context, request *cortexpbv2.WriteRequest) (*cortexpbv2.WriteResponse, error) +type FuncV2 func(ctx context.Context, request *cortexpb.WriteRequestV2) (*cortexpb.WriteResponseV2, error) // Handler is a http.Handler which accepts WriteRequests. func Handler(maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push Func, pushV2 FuncV2) http.Handler { @@ -107,7 +106,7 @@ func Handler(maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push F http.Error(w, string(resp.Body), int(resp.Code)) } case config.RemoteWriteProtoMsgV2: - var req cortexpbv2.WriteRequest + var req cortexpb.WriteRequestV2 err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRecvMsgSize, &req, util.RawSnappy) if err != nil { fmt.Println("err", err) @@ -118,7 +117,7 @@ func Handler(maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push F req.SkipLabelNameValidation = false if req.Source == 0 { - req.Source = cortexpbv2.API + req.Source = cortexpb.API } if resp, err := pushV2(ctx, &req); err != nil { diff --git a/pkg/util/push/push_test.go b/pkg/util/push/push_test.go index 178f2346c9..e4839cec93 100644 --- a/pkg/util/push/push_test.go +++ b/pkg/util/push/push_test.go @@ -16,11 +16,10 @@ import ( "github.com/weaveworks/common/middleware" "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/cortexpbv2" ) func TestHandler_remoteWrite(t *testing.T) { - handler := Handler(100000, nil, verifyWriteRequestHandler(t, cortexpb.API), verifyWriteRequestV2Handler(t, cortexpbv2.API)) + handler := Handler(100000, nil, verifyWriteRequestHandler(t, cortexpb.API), verifyWriteRequestV2Handler(t, cortexpb.API)) t.Run("remote write v1", func(t *testing.T) { req := createRequest(t, createPrometheusRemoteWriteProtobuf(t), false) @@ -45,7 +44,7 @@ func TestHandler_remoteWrite(t *testing.T) { func TestHandler_ContentTypeAndEncoding(t *testing.T) { sourceIPs, _ := middleware.NewSourceIPs("SomeField", "(.*)") - handler := Handler(100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.API), verifyWriteRequestV2Handler(t, cortexpbv2.API)) + handler := Handler(100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.API), verifyWriteRequestV2Handler(t, cortexpb.API)) tests := []struct { description string @@ -149,7 +148,7 @@ func TestHandler_ContentTypeAndEncoding(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { if test.isV2 { - req := createRequestWithHeaders(t, test.reqHeaders, createCortexRemoteWriteV2Protobuf(t, false, cortexpbv2.API)) + req := createRequestWithHeaders(t, test.reqHeaders, createCortexRemoteWriteV2Protobuf(t, false, cortexpb.API)) resp := httptest.NewRecorder() handler.ServeHTTP(resp, req) assert.Equal(t, test.expectedCode, resp.Code) @@ -165,7 +164,7 @@ func TestHandler_ContentTypeAndEncoding(t *testing.T) { func TestHandler_cortexWriteRequest(t *testing.T) { sourceIPs, _ := middleware.NewSourceIPs("SomeField", "(.*)") - handler := Handler(100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.RULE), verifyWriteRequestV2Handler(t, cortexpbv2.RULE)) + handler := Handler(100000, sourceIPs, verifyWriteRequestHandler(t, cortexpb.RULE), verifyWriteRequestV2Handler(t, cortexpb.RULE)) t.Run("remote write v1", func(t *testing.T) { req := createRequest(t, createCortexWriteRequestProtobuf(t, false, cortexpb.RULE), false) @@ -174,7 +173,7 @@ func TestHandler_cortexWriteRequest(t *testing.T) { assert.Equal(t, 200, resp.Code) }) t.Run("remote write v2", func(t *testing.T) { - req := createRequest(t, createCortexRemoteWriteV2Protobuf(t, false, cortexpbv2.RULE), true) + req := createRequest(t, createCortexRemoteWriteV2Protobuf(t, false, cortexpb.RULE), true) resp := httptest.NewRecorder() handler.ServeHTTP(resp, req) assert.Equal(t, 200, resp.Code) @@ -185,25 +184,25 @@ func TestHandler_ignoresSkipLabelNameValidationIfSet(t *testing.T) { for _, req := range []*http.Request{ createRequest(t, createCortexWriteRequestProtobuf(t, true, cortexpb.RULE), false), createRequest(t, createCortexWriteRequestProtobuf(t, false, cortexpb.RULE), false), - createRequest(t, createCortexRemoteWriteV2Protobuf(t, true, cortexpbv2.RULE), true), - createRequest(t, createCortexRemoteWriteV2Protobuf(t, false, cortexpbv2.RULE), true), + createRequest(t, createCortexRemoteWriteV2Protobuf(t, true, cortexpb.RULE), true), + createRequest(t, createCortexRemoteWriteV2Protobuf(t, false, cortexpb.RULE), true), } { resp := httptest.NewRecorder() - handler := Handler(100000, nil, verifyWriteRequestHandler(t, cortexpb.RULE), verifyWriteRequestV2Handler(t, cortexpbv2.RULE)) + handler := Handler(100000, nil, verifyWriteRequestHandler(t, cortexpb.RULE), verifyWriteRequestV2Handler(t, cortexpb.RULE)) handler.ServeHTTP(resp, req) assert.Equal(t, 200, resp.Code) } } -func verifyWriteRequestV2Handler(t *testing.T, expectSource cortexpbv2.WriteRequest_SourceEnum) func(ctx context.Context, request *cortexpbv2.WriteRequest) (response *cortexpbv2.WriteResponse, err error) { +func verifyWriteRequestV2Handler(t *testing.T, expectSource cortexpb.SourceEnum) func(ctx context.Context, request *cortexpb.WriteRequestV2) (response *cortexpb.WriteResponseV2, err error) { t.Helper() - return func(ctx context.Context, request *cortexpbv2.WriteRequest) (response *cortexpbv2.WriteResponse, err error) { + return func(ctx context.Context, request *cortexpb.WriteRequestV2) (response *cortexpb.WriteResponseV2, err error) { assert.Len(t, request.Timeseries, 1) assert.Equal(t, "__name__", request.Symbols[1]) assert.Equal(t, "foo", request.Symbols[2]) assert.Equal(t, expectSource, request.Source) assert.False(t, request.SkipLabelNameValidation) - resp := &cortexpbv2.WriteResponse{ + resp := &cortexpb.WriteResponseV2{ Samples: 1, Histograms: 1, Exemplars: 1, @@ -212,7 +211,7 @@ func verifyWriteRequestV2Handler(t *testing.T, expectSource cortexpbv2.WriteRequ } } -func verifyWriteRequestHandler(t *testing.T, expectSource cortexpb.WriteRequest_SourceEnum) func(ctx context.Context, request *cortexpb.WriteRequest) (response *cortexpb.WriteResponse, err error) { +func verifyWriteRequestHandler(t *testing.T, expectSource cortexpb.SourceEnum) func(ctx context.Context, request *cortexpb.WriteRequest) (response *cortexpb.WriteResponse, err error) { t.Helper() return func(ctx context.Context, request *cortexpb.WriteRequest) (response *cortexpb.WriteResponse, err error) { assert.Len(t, request.Timeseries, 1) @@ -255,16 +254,16 @@ func createRequest(t *testing.T, protobuf []byte, isV2 bool) *http.Request { return req } -func createCortexRemoteWriteV2Protobuf(t *testing.T, skipLabelNameValidation bool, source cortexpbv2.WriteRequest_SourceEnum) []byte { +func createCortexRemoteWriteV2Protobuf(t *testing.T, skipLabelNameValidation bool, source cortexpb.SourceEnum) []byte { t.Helper() - input := cortexpbv2.WriteRequest{ + input := cortexpb.WriteRequestV2{ Symbols: []string{"", "__name__", "foo"}, - Timeseries: []cortexpbv2.PreallocTimeseriesV2{ + Timeseries: []cortexpb.PreallocTimeseriesV2{ { - TimeSeries: &cortexpbv2.TimeSeries{ + TimeSeriesV2: &cortexpb.TimeSeriesV2{ LabelsRefs: []uint32{1, 2}, - Samples: []cortexpbv2.Sample{ - {Value: 1, Timestamp: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()}, + Samples: []cortexpb.Sample{ + {Value: 1, TimestampMs: time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC).UnixNano()}, }, }, }, @@ -315,7 +314,7 @@ func createPrometheusRemoteWriteProtobuf(t *testing.T) []byte { require.NoError(t, err) return inoutBytes } -func createCortexWriteRequestProtobuf(t *testing.T, skipLabelNameValidation bool, source cortexpb.WriteRequest_SourceEnum) []byte { +func createCortexWriteRequestProtobuf(t *testing.T, skipLabelNameValidation bool, source cortexpb.SourceEnum) []byte { t.Helper() ts := cortexpb.PreallocTimeseries{ TimeSeries: &cortexpb.TimeSeries{ diff --git a/pkg/util/validation/validate.go b/pkg/util/validation/validate.go index dda50dddb3..292fd273cd 100644 --- a/pkg/util/validation/validate.go +++ b/pkg/util/validation/validate.go @@ -17,7 +17,6 @@ import ( "github.com/weaveworks/common/httpgrpc" "github.com/cortexproject/cortex/pkg/cortexpb" - "github.com/cortexproject/cortex/pkg/cortexpbv2" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/extract" ) @@ -154,7 +153,7 @@ func ValidateSampleTimestamp(validateMetrics *ValidateMetrics, limits *Limits, u // ValidateExemplarV2 returns an error if the exemplar is invalid. // The returned error may retain the provided series labels. -func ValidateExemplarV2(validateMetrics *ValidateMetrics, symbols []string, userID string, seriesLabels []cortexpb.LabelAdapter, e *cortexpbv2.Exemplar, b labels.ScratchBuilder, st *writev2.SymbolsTable) ValidationError { +func ValidateExemplarV2(validateMetrics *ValidateMetrics, symbols []string, userID string, seriesLabels []cortexpb.LabelAdapter, e *cortexpb.ExemplarV2, b labels.ScratchBuilder, st *writev2.SymbolsTable) ValidationError { lbs := e.ToLabels(&b, symbols) // symbolize examplar labels e.LabelsRefs = st.SymbolizeLabels(lbs, nil) @@ -290,7 +289,7 @@ func ValidateLabels(validateMetrics *ValidateMetrics, limits *Limits, userID str } // ValidateMetadata returns an err if a metric metadata is invalid. -func ValidateMetadataV2(validateMetrics *ValidateMetrics, cfg *Limits, userID string, symbols []string, metadata *cortexpbv2.Metadata, st *writev2.SymbolsTable) error { +func ValidateMetadataV2(validateMetrics *ValidateMetrics, cfg *Limits, userID string, symbols []string, metadata *cortexpb.MetadataV2, st *writev2.SymbolsTable) error { help := symbols[metadata.HelpRef] unit := symbols[metadata.UnitRef] @@ -353,67 +352,6 @@ func ValidateMetadata(validateMetrics *ValidateMetrics, cfg *Limits, userID stri return nil } -func ValidateNativeHistogramV2(validateMetrics *ValidateMetrics, limits *Limits, userID string, ls []cortexpb.LabelAdapter, histogramSample cortexpbv2.Histogram) (cortexpbv2.Histogram, error) { - if limits.MaxNativeHistogramBuckets == 0 { - return histogramSample, nil - } - - var ( - exceedLimit bool - ) - if histogramSample.IsFloatHistogram() { - // Initial check to see if the bucket limit is exceeded or not. If not, we can avoid type casting. - exceedLimit = len(histogramSample.PositiveCounts)+len(histogramSample.NegativeCounts) > limits.MaxNativeHistogramBuckets - if !exceedLimit { - return histogramSample, nil - } - // Exceed limit. - if histogramSample.Schema <= histogram.ExponentialSchemaMin { - validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramBucketCountLimitExceeded, userID).Inc() - return cortexpbv2.Histogram{}, newHistogramBucketLimitExceededError(ls, limits.MaxNativeHistogramBuckets) - } - fh := cortexpbv2.FloatHistogramProtoToFloatHistogram(histogramSample) - oBuckets := len(fh.PositiveBuckets) + len(fh.NegativeBuckets) - for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > limits.MaxNativeHistogramBuckets { - if fh.Schema <= histogram.ExponentialSchemaMin { - validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramBucketCountLimitExceeded, userID).Inc() - return cortexpbv2.Histogram{}, newHistogramBucketLimitExceededError(ls, limits.MaxNativeHistogramBuckets) - } - fh = fh.ReduceResolution(fh.Schema - 1) - } - if oBuckets != len(fh.PositiveBuckets)+len(fh.NegativeBuckets) { - validateMetrics.HistogramSamplesReducedResolution.WithLabelValues(userID).Inc() - } - // If resolution reduced, convert new float histogram to protobuf type again. - return cortexpbv2.FloatHistogramToHistogramProto(histogramSample.Timestamp, fh), nil - } - - // Initial check to see if bucket limit is exceeded or not. If not, we can avoid type casting. - exceedLimit = len(histogramSample.PositiveDeltas)+len(histogramSample.NegativeDeltas) > limits.MaxNativeHistogramBuckets - if !exceedLimit { - return histogramSample, nil - } - // Exceed limit. - if histogramSample.Schema <= histogram.ExponentialSchemaMin { - validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramBucketCountLimitExceeded, userID).Inc() - return cortexpbv2.Histogram{}, newHistogramBucketLimitExceededError(ls, limits.MaxNativeHistogramBuckets) - } - h := cortexpbv2.HistogramProtoToHistogram(histogramSample) - oBuckets := len(h.PositiveBuckets) + len(h.NegativeBuckets) - for len(h.PositiveBuckets)+len(h.NegativeBuckets) > limits.MaxNativeHistogramBuckets { - if h.Schema <= histogram.ExponentialSchemaMin { - validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramBucketCountLimitExceeded, userID).Inc() - return cortexpbv2.Histogram{}, newHistogramBucketLimitExceededError(ls, limits.MaxNativeHistogramBuckets) - } - h = h.ReduceResolution(h.Schema - 1) - } - if oBuckets != len(h.PositiveBuckets)+len(h.NegativeBuckets) { - validateMetrics.HistogramSamplesReducedResolution.WithLabelValues(userID).Inc() - } - // If resolution reduced, convert new histogram to protobuf type again. - return cortexpbv2.HistogramToHistogramProto(histogramSample.Timestamp, h), nil -} - func ValidateNativeHistogram(validateMetrics *ValidateMetrics, limits *Limits, userID string, ls []cortexpb.LabelAdapter, histogramSample cortexpb.Histogram) (cortexpb.Histogram, error) { if limits.MaxNativeHistogramBuckets == 0 { return histogramSample, nil